code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _a = { '''configuration_speecht5''': [ '''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''', '''SpeechT5Config''', '''SpeechT5HifiGanConfig''', ], '''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''], '''processing_speecht5''': ['''SpeechT5Processor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''SpeechT5Tokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SpeechT5ForSpeechToText''', '''SpeechT5ForSpeechToSpeech''', '''SpeechT5ForTextToSpeech''', '''SpeechT5Model''', '''SpeechT5PreTrainedModel''', '''SpeechT5HifiGan''', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
322
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
322
1
'''simple docstring''' def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" A = [False] * len(A_ ) A = [] queue.append(A_ ) A = True while queue: A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A_ ) A = True A = u return visited[t] def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" A = [-1] * (len(A_ )) A = 0 while bfs(A_ , A_ , A_ , A_ ): A = float("""Inf""" ) A = sink while s != source: # Find the minimum value in select path A = min(A_ , graph[parent[s]][s] ) A = parent[s] max_flow += path_flow A = sink while v != source: A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A = parent[v] return max_flow _lowerCamelCase : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCamelCase : Tuple = 0, 5 print(ford_fulkerson(graph, source, sink))
358
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _lowerCamelCase : Any = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class __UpperCAmelCase : '''simple docstring''' def __init__(self : int , _lowerCAmelCase : int = 14 ): if group not in primes: raise ValueError("""Unsupported Group""" ) A = primes[group]["""prime"""] A = primes[group]["""generator"""] A = int(hexlify(urandom(32 ) ) , base=16 ) def A (self : Optional[Any] ): return hex(self.__private_key )[2:] def A (self : Union[str, Any] ): A = pow(self.generator , self.__private_key , self.prime ) return hex(_lowerCAmelCase )[2:] def A (self : Any , _lowerCAmelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def A (self : List[str] , _lowerCAmelCase : str ): A = int(_lowerCAmelCase , base=16 ) if not self.is_valid_public_key(_lowerCAmelCase ): raise ValueError("""Invalid public key""" ) A = pow(_lowerCAmelCase , self.__private_key , self.prime ) return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest() @staticmethod def A (_lowerCAmelCase : int , _lowerCAmelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1 ) @staticmethod def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ): A = int(_lowerCAmelCase , base=16 ) A = int(_lowerCAmelCase , base=16 ) A = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError("""Invalid public key""" ) A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
337
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase : Union[str, Any] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['ViTFeatureExtractor'] UpperCAmelCase : List[str] = ['ViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'VIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTForImageClassification', 'ViTForMaskedImageModeling', 'ViTModel', 'ViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'TFViTForImageClassification', 'TFViTModel', 'TFViTPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'FlaxViTForImageClassification', 'FlaxViTModel', 'FlaxViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
280
def _a ( lowerCamelCase: dict ) -> bool: '''simple docstring''' __A = set() # To detect a back edge, keep track of vertices currently in the recursion stack __A = set() return any( node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) for node in graph ) def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: set , lowerCamelCase: set ) -> bool: '''simple docstring''' visited.add(lowerCamelCase ) rec_stk.add(lowerCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(lowerCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
117
0
"""simple docstring""" import math def UpperCamelCase ( __magic_name__ : int ) -> Tuple: """simple docstring""" lowercase__ = 0 lowercase__ = 0 while num > 0: lowercase__ = num % 8 lowercase__ = octal + (remainder * math.floor(math.pow(10 , snake_case_ ) )) counter += 1 lowercase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'''0o{int(snake_case_ )}''' def UpperCamelCase ( ) -> List[Any]: """simple docstring""" print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(216 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(512 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
370
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate A : int = trt.Logger(trt.Logger.WARNING) A : Dict = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) A : Union[str, Any] = logging.getLogger(__name__) A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=3_8_4, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=1_2_8, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=2_0, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=3_0, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) A : List[Any] = parser.parse_args() if args.tokenizer_name: A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) A : Optional[Any] = args.per_device_eval_batch_size A : Tuple = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties A : Any = True A : Optional[int] = 'temp_engine/bert-fp32.engine' if args.fpaa: A : Union[str, Any] = 'temp_engine/bert-fp16.engine' if args.inta: A : Optional[int] = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network A : List[str] = [network.get_input(i) for i in range(network.num_inputs)] A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: A : Union[str, Any] = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) A : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) A : int = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ ) # start time lowercase__ = time.time() # Run inference context.execute_async( bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ ) cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ ) # Synchronize the stream and take time stream.synchronize() # end time lowercase__ = time.time() lowercase__ = end_time - start_time lowercase__ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. A : Dict = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. A : str = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. A : str = raw_datasets['validation'].column_names A : Any = 'question' if 'question' in column_names else column_names[0] A : int = 'context' if 'context' in column_names else column_names[1] A : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). A : Dict = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) A : str = min(args.max_seq_length, tokenizer.model_max_length) def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowercase__ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowercase__ = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowercase__ = tokenized_examples.sequence_ids(__magic_name__ ) lowercase__ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowercase__ = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowercase__ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples A : Optional[Any] = raw_datasets['validation'] # Validation Feature Creation A : int = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) A : Dict = default_data_collator A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) A : Optional[Any] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]: """simple docstring""" lowercase__ = postprocess_qa_predictions( examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowercase__ = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ ) A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize # Allocate device memory for inputs and outputs. A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) A : List[str] = cuda.mem_alloc(h_outputa.nbytes) A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. A : Union[str, Any] = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') A : List[Any] = 0.0 A : Any = 0 A : str = timeit.default_timer() A : Tuple = None for step, batch in enumerate(eval_dataloader): A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 A , A : int = outputs A : str = torch.tensor(start_logits) A : int = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: A : List[str] = nested_truncate(all_preds, len(eval_dataset)) A : List[Any] = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0)) logger.info('Total Number of Inference = %d', niter) A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds) A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
146
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _a ( SCREAMING_SNAKE_CASE : dict ): """simple docstring""" return (data["data"], data["target"]) def _a ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ): """simple docstring""" UpperCamelCase__ : Dict = XGBClassifier() classifier.fit(__lowerCamelCase , __lowerCamelCase ) return classifier def _a ( ): """simple docstring""" UpperCamelCase__ : List[str] = load_iris() UpperCamelCase__ : str = data_handling(__lowerCamelCase ) UpperCamelCase__ : List[str] = train_test_split( __lowerCamelCase , __lowerCamelCase , test_size=0.25 ) UpperCamelCase__ : Optional[Any] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ : List[Any] = xgboost(__lowerCamelCase , __lowerCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , display_labels=__lowerCamelCase , cmap='''Blues''' , normalize='''true''' , ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
146
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : List[Any] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class a ( lowercase__ ): """simple docstring""" a : int = 'segformer' def __init__( self : Dict , __lowercase : str=3 , __lowercase : Dict=4 , __lowercase : Any=[2, 2, 2, 2] , __lowercase : Optional[int]=[8, 4, 2, 1] , __lowercase : List[str]=[32, 64, 160, 256] , __lowercase : Union[str, Any]=[7, 3, 3, 3] , __lowercase : Optional[int]=[4, 2, 2, 2] , __lowercase : Any=[1, 2, 5, 8] , __lowercase : List[str]=[4, 4, 4, 4] , __lowercase : Any="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Optional[Any]=0.1 , __lowercase : int=0.02 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-6 , __lowercase : Tuple=256 , __lowercase : List[Any]=255 , **__lowercase : Union[str, Any] , ) -> List[Any]: super().__init__(**__lowercase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __lowercase , ) __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Any = num_encoder_blocks __UpperCAmelCase : List[Any] = depths __UpperCAmelCase : Dict = sr_ratios __UpperCAmelCase : int = hidden_sizes __UpperCAmelCase : Optional[Any] = patch_sizes __UpperCAmelCase : List[Any] = strides __UpperCAmelCase : List[str] = mlp_ratios __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : Any = classifier_dropout_prob __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Optional[Any] = drop_path_rate __UpperCAmelCase : int = layer_norm_eps __UpperCAmelCase : Dict = decoder_hidden_size __UpperCAmelCase : Tuple = kwargs.get("""reshape_last_stage""" , __lowercase ) __UpperCAmelCase : int = semantic_loss_ignore_index class a ( lowercase__ ): """simple docstring""" a : Union[str, Any] = version.parse('1.11' ) @property def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase ( self : Optional[Any] ) -> float: return 1e-4 @property def UpperCAmelCase ( self : str ) -> int: return 12
114
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = ["""pixel_values"""] def __init__( self :Optional[int] , snake_case :Any = True , snake_case :Any = None , snake_case :Any = PILImageResampling.BILINEAR , snake_case :Union[str, Any] = True , snake_case :Union[str, Any] = None , snake_case :Optional[int] = True , snake_case :Tuple = 1 / 255 , snake_case :Any = True , snake_case :Union[str, Any] = None , snake_case :Any = None , **snake_case :Tuple , ): '''simple docstring''' super().__init__(**_lowercase ) A_ : int = size if size is not None else {"shortest_edge": 256} A_ : Any = get_size_dict(_lowercase , default_to_square=_lowercase ) A_ : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224} A_ : List[Any] = get_size_dict(_lowercase ) A_ : Union[str, Any] = do_resize A_ : Dict = size A_ : Optional[int] = resample A_ : List[str] = do_center_crop A_ : Union[str, Any] = crop_size A_ : Optional[Any] = do_rescale A_ : Optional[int] = rescale_factor A_ : List[Any] = do_normalize A_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str , snake_case :Optional[Any] , snake_case :List[Any] = PILImageResampling.BICUBIC , snake_case :int = None , **snake_case :Optional[Any] , ): '''simple docstring''' A_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) A_ : Tuple = get_resize_output_image_size(_lowercase , size=size["shortest_edge"] , default_to_square=_lowercase ) return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Tuple , snake_case :str , snake_case :Tuple = None , **snake_case :List[Any] , ): '''simple docstring''' A_ : Dict = get_size_dict(_lowercase ) return center_crop(_lowercase , size=(size["height"], size["width"]) , data_format=_lowercase , **_lowercase ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[str] , snake_case :List[Any] , snake_case :List[Any] = None , **snake_case :Optional[int] ): '''simple docstring''' return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[Any] , snake_case :Dict , snake_case :List[Any] , snake_case :List[Any] = None , **snake_case :Optional[int] , ): '''simple docstring''' return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] = None , snake_case :str = None , snake_case :Optional[int] = None , snake_case :Optional[Any] = None , snake_case :int = None , snake_case :Optional[int] = None , snake_case :Union[str, Any] = None , snake_case :List[Any] = None , snake_case :int = None , snake_case :int = None , snake_case :Union[str, Any] = None , snake_case :Optional[int] = ChannelDimension.FIRST , **snake_case :Union[str, Any] , ): '''simple docstring''' A_ : Any = do_resize if do_resize is not None else self.do_resize A_ : Union[str, Any] = size if size is not None else self.size A_ : Any = get_size_dict(_lowercase , default_to_square=_lowercase ) A_ : Dict = resample if resample is not None else self.resample A_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Optional[int] = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_lowercase ) A_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize A_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. A_ : Any = [to_numpy_array(_lowercase ) for image in images] if do_resize: A_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images] if do_rescale: A_ : Optional[int] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: A_ : List[Any] = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] A_ : int = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] A_ : Dict = {"pixel_values": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
366
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _lowerCAmelCase : Tuple = logging.get_logger('''transformers.models.speecht5''') _lowerCAmelCase : int = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } _lowerCAmelCase : str = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } _lowerCAmelCase : int = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } _lowerCAmelCase : Union[str, Any] = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } _lowerCAmelCase : Union[str, Any] = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } _lowerCAmelCase : int = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } _lowerCAmelCase : Any = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } _lowerCAmelCase : List[str] = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } _lowerCAmelCase : Optional[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _lowerCAmelCase : Dict = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _lowerCAmelCase : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : Tuple = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] _lowerCAmelCase : Tuple = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] _lowerCAmelCase : int = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] _lowerCAmelCase : Optional[int] = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ) -> Optional[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : Tuple = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Dict = value elif weight_type == "weight_g": A_ : int = value elif weight_type == "weight_v": A_ : str = value elif weight_type == "bias": A_ : int = value elif weight_type == "running_mean": A_ : str = value elif weight_type == "running_var": A_ : Any = value elif weight_type == "num_batches_tracked": A_ : str = value else: A_ : int = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> Union[str, Any]: for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_ , A_ : Tuple = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: A_ : Tuple = [] if task == "s2t": A_ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder A_ : str = MAPPING_S2T A_ : Union[str, Any] = IGNORE_KEYS_S2T elif task == "t2s": A_ : Optional[int] = None A_ : Dict = MAPPING_T2S A_ : Any = IGNORE_KEYS_T2S elif task == "s2s": A_ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder A_ : Dict = MAPPING_S2S A_ : List[str] = IGNORE_KEYS_S2S else: raise ValueError(f"Unsupported task: {task}" ) for name, value in fairseq_dict.items(): if should_ignore(_lowerCAmelCase , _lowerCAmelCase ): logger.info(f"{name} was ignored" ) continue A_ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A_ , A_ : Optional[Any] = key.split(".*." ) if prefix in name and suffix in name: A_ : int = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A_ : str = True if "*" in mapped_key: A_ : List[str] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : Union[str, Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Tuple = "bias" elif "weight" in name: A_ : List[Any] = "weight" elif "running_mean" in name: A_ : Union[str, Any] = "running_mean" elif "running_var" in name: A_ : Union[str, Any] = "running_var" elif "num_batches_tracked" in name: A_ : List[Any] = "num_batches_tracked" else: A_ : Optional[Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> List[Any]: A_ : int = full_name.split("conv_layers." )[-1] A_ : Optional[Any] = name.split("." ) A_ : List[Any] = int(items[0] ) A_ : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : Optional[int] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : Optional[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , ) -> Optional[Any]: if config_path is not None: A_ : Dict = SpeechTaConfig.from_pretrained(_lowerCAmelCase ) else: A_ : Optional[int] = SpeechTaConfig() if task == "s2t": A_ : Optional[Any] = config.max_text_positions A_ : Optional[int] = SpeechTaForSpeechToText(_lowerCAmelCase ) elif task == "t2s": A_ : str = 1876 A_ : List[str] = 600 A_ : List[str] = config.max_speech_positions A_ : Tuple = SpeechTaForTextToSpeech(_lowerCAmelCase ) elif task == "s2s": A_ : Optional[int] = 1876 A_ : int = config.max_speech_positions A_ : Union[str, Any] = SpeechTaForSpeechToSpeech(_lowerCAmelCase ) else: raise ValueError(f"Unknown task name: {task}" ) if vocab_path: A_ : int = SpeechTaTokenizer(_lowerCAmelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A_ : str = AddedToken("<mask>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) A_ : int = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A_ : int = SpeechTaFeatureExtractor() A_ : Optional[Any] = SpeechTaProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) A_ : Union[str, Any] = torch.load(_lowerCAmelCase ) recursively_load_weights(fairseq_checkpoint["model"] , _lowerCAmelCase , _lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(_lowerCAmelCase ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) _lowerCAmelCase : Tuple = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
70
0
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests A__ : Optional[Any] ='''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user A__ : Union[str, Any] =BASE_URL + '''/user''' # https://github.com/settings/tokens A__ : Optional[int] =os.environ.get('''USER_TOKEN''', '''''') def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """Authorization""": f"token {auth_token}", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"""{key}: {value}""") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
70
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( a__ ): lowerCAmelCase :Dict = ['''image_processor''', '''tokenizer'''] lowerCAmelCase :Union[str, Any] = '''BlipImageProcessor''' lowerCAmelCase :Any = '''AutoTokenizer''' def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): super().__init__(_lowerCamelCase , _lowerCamelCase) # add QFormer tokenizer UpperCAmelCase__ : List[str] = qformer_tokenizer def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""") UpperCAmelCase__ : List[str] = BatchFeature() if text is not None: UpperCAmelCase__ : Any = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) encoding.update(_lowerCamelCase) UpperCAmelCase__ : Optional[Any] = self.qformer_tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) UpperCAmelCase__ : Dict = qformer_text_encoding.pop("""input_ids""") UpperCAmelCase__ : Tuple = qformer_text_encoding.pop("""attention_mask""") if images is not None: UpperCAmelCase__ : List[str] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase) encoding.update(_lowerCamelCase) return encoding def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase) def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names UpperCAmelCase__ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase): if os.path.isfile(_lowerCamelCase): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''') os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , """qformer_tokenizer""") self.qformer_tokenizer.save_pretrained(_lowerCamelCase) return super().save_pretrained(_lowerCamelCase , **_lowerCamelCase) @classmethod def snake_case__ ( cls , _lowerCamelCase , **_lowerCamelCase): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase , subfolder="""qformer_tokenizer""") UpperCAmelCase__ : List[Any] = cls._get_arguments_from_pretrained(_lowerCamelCase , **_lowerCamelCase) args.append(_lowerCamelCase) return cls(*_lowerCamelCase)
163
0
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int ) ->Optional[Any]: A__ : int = RobertaPreLayerNormConfig.from_pretrained( UpperCAmelCase__, architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict A__ : Optional[Any] = torch.load(hf_hub_download(repo_id=UpperCAmelCase__, filename="""pytorch_model.bin""" ) ) A__ : int = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): A__ : Optional[int] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue A__ : str = tensor_value A__ : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=UpperCAmelCase__, config=UpperCAmelCase__, state_dict=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) # convert tokenizer A__ : List[str] = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) tokenizer.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
360
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model'''} A_ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } A_ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) A_ = 0 A_ = 1 A_ = 2 A_ = 3 A_ = 4 class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = 'left' def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ): '''simple docstring''' A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) A__ : str = 3 A__ : str = do_lower_case A__ : Optional[Any] = remove_space A__ : List[Any] = keep_accents A__ : Union[str, Any] = vocab_file A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @property def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): '''simple docstring''' A__ : int = self.__dict__.copy() A__ : int = None return state def __setstate__( self : Tuple , snake_case : Union[str, Any] ): '''simple docstring''' A__ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ : Optional[int] = {} A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ): '''simple docstring''' if self.remove_space: A__ : Optional[Any] = """ """.join(inputs.strip().split() ) else: A__ : Dict = inputs A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: A__ : Any = unicodedata.normalize("""NFKD""" , snake_case ) A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] ) if self.do_lower_case: A__ : Any = outputs.lower() return outputs def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ): '''simple docstring''' A__ : Dict = self.preprocess_text(snake_case ) A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case ) A__ : Optional[int] = [] for piece in pieces: if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ : int = cur_pieces[1:] else: A__ : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(snake_case ) else: new_pieces.append(snake_case ) return new_pieces def _UpperCamelCase ( self : List[str] , snake_case : Tuple ): '''simple docstring''' return self.sp_model.PieceToId(snake_case ) def _UpperCamelCase ( self : List[str] , snake_case : Any ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case ) def _UpperCamelCase ( self : Optional[int] , snake_case : Any ): '''simple docstring''' A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip() return out_string def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ): '''simple docstring''' A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case ) A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ : Any = [] A__ : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(snake_case ) ) A__ : str = [] sub_texts.append(snake_case ) else: current_sub_text.append(snake_case ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(snake_case ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens A__ : Dict = """""".join(snake_case ) A__ : int = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ : Tuple = self.clean_up_tokenization(snake_case ) return clean_text else: return text def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ): '''simple docstring''' A__ : Tuple = [self.sep_token_id] A__ : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is not None: return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1] return ([0] * len(snake_case )) + [1, 1] def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ): '''simple docstring''' A__ : Any = [self.sep_token_id] A__ : int = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ : List[Any] = os.path.join( snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , """wb""" ) as fi: A__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
296
0
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _lowercase = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') _lowercase = F"""https://www.google.com/search?q={query}&num=100""" _lowercase = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: _lowercase = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: _lowercase = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
74
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
0
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil a_ : Tuple = 1_00 a_ : List[Any] = set(range(3, NUM_PRIMES, 2)) primes.add(2) a_ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def a_ ( __snake_case : int ) -> set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCamelCase_ =set() lowerCamelCase_ =42 lowerCamelCase_ =42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def a_ ( __snake_case : int = 5000 ) -> int | None: """simple docstring""" for number_to_partition in range(1 , __snake_case ): if len(partition(__snake_case ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"""{solution() = }""")
6
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case_ : str = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys snake_case_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCamelCase : str = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) UpperCamelCase__ : int = [] # custom device map if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: UpperCamelCase__ : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: UpperCamelCase__ : Union[str, Any] = [] UpperCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(SCREAMING_SNAKE_CASE ) # compatibility with peft UpperCamelCase__ : Optional[Any] = load_in_abit UpperCamelCase__ : List[str] = load_in_abit UpperCamelCase__ : str = get_parameter_device(SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) UpperCamelCase__ : Union[str, Any] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE ) # convert param to the right dtype UpperCamelCase__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: UpperCamelCase__ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(SCREAMING_SNAKE_CASE ): param.to(SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"The model device type is {model_device.type}. However, cuda is needed for quantization." '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): UpperCamelCase__ : str = replace_with_bnb_layers( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = get_quantized_model_device_map( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): UpperCamelCase__ : Dict = True UpperCamelCase__ : str = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): UpperCamelCase__ : int = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) UpperCamelCase__ : str = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) UpperCamelCase__ : Optional[Any] = {} UpperCamelCase__ : Union[str, Any] = special_dtypes UpperCamelCase__ : Optional[int] = no_split_module_classes UpperCamelCase__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": UpperCamelCase__ : Dict = get_balanced_memory( SCREAMING_SNAKE_CASE , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) UpperCamelCase__ : Tuple = max_memory UpperCamelCase__ : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu UpperCamelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules UpperCamelCase__ : Dict = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ): """simple docstring""" if modules_to_not_convert is None: UpperCamelCase__ : Optional[Any] = [] UpperCamelCase__ , UpperCamelCase__ : Dict = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ): """simple docstring""" UpperCamelCase__ : str = False for name, module in model.named_children(): if current_key_name is None: UpperCamelCase__ : Tuple = [] current_key_name.append(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` UpperCamelCase__ : int = '''.'''.join(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: UpperCamelCase__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: UpperCamelCase__ : int = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: UpperCamelCase__ : Optional[int] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) UpperCamelCase__ : List[Any] = module.weight.data if module.bias is not None: UpperCamelCase__ : List[str] = module.bias.data bnb_module.requires_grad_(SCREAMING_SNAKE_CASE ) setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = True if len(list(module.children() ) ) > 0: UpperCamelCase__ , UpperCamelCase__ : Tuple = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" with init_empty_weights(): UpperCamelCase__ : Dict = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` UpperCamelCase__ : str = find_tied_parameters(SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase__ : int = sum(SCREAMING_SNAKE_CASE , [] ) UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model UpperCamelCase__ : str = False if hasattr(SCREAMING_SNAKE_CASE , '''base_model_prefix''' ): UpperCamelCase__ : int = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase__ : Tuple = list(model.named_children() ) UpperCamelCase__ : str = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase__ : Dict = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys UpperCamelCase__ : int = ['''.weight''', '''.bias'''] UpperCamelCase__ : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase__ : int = name.replace(SCREAMING_SNAKE_CASE , '''''' ) filtered_module_names.append(SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" for m in model.modules(): if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( SCREAMING_SNAKE_CASE : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = param_name UpperCamelCase__ : str = model if "." in tensor_name: UpperCamelCase__ : List[Any] = tensor_name.split('''.''' ) for split in splits[:-1]: UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(F"{module} has no attribute {split}." ) UpperCamelCase__ : Optional[int] = new_module UpperCamelCase__ : List[str] = splits[-1] # offload weights UpperCamelCase__ : Any = False offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , ) else: offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE ) offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''meta''' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
146
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class lowerCamelCase_( A__ ): '''simple docstring''' lowercase__ : Union[str, Any] = '''bloom''' lowercase__ : Union[str, Any] = ['''past_key_values'''] lowercase__ : List[str] = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=6_4 , lowerCamelCase__=2 , lowerCamelCase__=8 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ , ): _lowerCamelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCamelCase = kwargs.pop('''n_embed''' , _snake_case ) _lowerCamelCase = hidden_size if n_embed is None else n_embed _lowerCamelCase = n_layer _lowerCamelCase = n_head _lowerCamelCase = layer_norm_epsilon _lowerCamelCase = initializer_range _lowerCamelCase = use_cache _lowerCamelCase = pretraining_tp _lowerCamelCase = apply_residual_connection_post_layernorm _lowerCamelCase = hidden_dropout _lowerCamelCase = attention_dropout _lowerCamelCase = bos_token_id _lowerCamelCase = eos_token_id _lowerCamelCase = slow_but_exact super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) class lowerCamelCase_( A__ ): '''simple docstring''' lowercase__ : Tuple = version.parse('1.12' ) def __init__( self , lowerCamelCase__ , lowerCamelCase__ = "default" , lowerCamelCase__ = None , lowerCamelCase__ = False , ): super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case ) if not getattr(self._config , '''pad_token_id''' , _snake_case ): # TODO: how to do that better? _lowerCamelCase = 0 @property def snake_case__ ( self ): _lowerCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_snake_case , direction='''inputs''' , inverted_values_shape=_snake_case ) _lowerCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def snake_case__ ( self ): return self._config.n_layer @property def snake_case__ ( self ): return self._config.n_head @property def snake_case__ ( self ): return 1e-3 def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ): _lowerCamelCase = super(_snake_case , self ).generate_dummy_inputs( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) # We need to order the input in the way they appears in the forward() _lowerCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _lowerCamelCase , _lowerCamelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _lowerCamelCase = seqlen + 2 _lowerCamelCase = self._config.hidden_size // self.num_attention_heads _lowerCamelCase = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) _lowerCamelCase = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) _lowerCamelCase = [ (torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers ) ] _lowerCamelCase = common_inputs['''attention_mask'''] if self.use_past: _lowerCamelCase = ordered_inputs['''attention_mask'''].dtype _lowerCamelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 ) return ordered_inputs @property def snake_case__ ( self ): return 1_3
357
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class lowerCamelCase_( A__ ): '''simple docstring''' lowercase__ : List[str] = 'mgp-str' def __init__( self , lowerCamelCase__=[3_2, 1_2_8] , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=2_7 , lowerCamelCase__=3_8 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ): super().__init__(**lowerCamelCase__ ) _lowerCamelCase = image_size _lowerCamelCase = patch_size _lowerCamelCase = num_channels _lowerCamelCase = max_token_length _lowerCamelCase = num_character_labels _lowerCamelCase = num_bpe_labels _lowerCamelCase = num_wordpiece_labels _lowerCamelCase = hidden_size _lowerCamelCase = num_hidden_layers _lowerCamelCase = num_attention_heads _lowerCamelCase = mlp_ratio _lowerCamelCase = distilled _lowerCamelCase = layer_norm_eps _lowerCamelCase = drop_rate _lowerCamelCase = qkv_bias _lowerCamelCase = attn_drop_rate _lowerCamelCase = drop_path_rate _lowerCamelCase = output_aa_attentions _lowerCamelCase = initializer_range
73
0
'''simple docstring''' def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : list ) ->Union[str, Any]: _enforce_args(__lowerCamelCase , __lowerCamelCase ) if n == 0: return 0 _SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): _SCREAMING_SNAKE_CASE = max( __lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowerCamelCase ) ) return max_revue def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : list ) ->Optional[Any]: _enforce_args(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : list , __lowerCamelCase : list ) ->Tuple: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): _SCREAMING_SNAKE_CASE = max( __lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowerCamelCase , __lowerCamelCase ) , ) _SCREAMING_SNAKE_CASE = max_revenue return max_rev[n] def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : list ) ->Union[str, Any]: _enforce_args(__lowerCamelCase , __lowerCamelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] _SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1 ): _SCREAMING_SNAKE_CASE = max_rev[i] for j in range(1 , i + 1 ): _SCREAMING_SNAKE_CASE = max(__lowerCamelCase , prices[j - 1] + max_rev[i - j] ) _SCREAMING_SNAKE_CASE = max_revenue_i return max_rev[n] def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : list ) ->int: if n < 0: _SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}' raise ValueError(__lowerCamelCase ) if n > len(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = ( """Each integral piece of rod must have a corresponding price. """ F'Got n = {n} but length of prices = {len(__lowerCamelCase )}' ) raise ValueError(__lowerCamelCase ) def lowerCamelCase ( ) ->Tuple: _SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23] _SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _SCREAMING_SNAKE_CASE = 36 _SCREAMING_SNAKE_CASE = top_down_cut_rod(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = bottom_up_cut_rod(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(__lowerCamelCase , __lowerCamelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
58
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) ) class UpperCAmelCase : def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = sr_ratios _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = downsampling_rates _lowerCAmelCase = num_attention_heads _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = scope def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple: _lowerCAmelCase = SegformerModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case ) _lowerCAmelCase = model(__snake_case , labels=__snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowercase: Tuple = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase: Tuple = True _lowercase: Union[str, Any] = False _lowercase: Dict = False _lowercase: Optional[Any] = False def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = SegformerModelTester(self ) _lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[Any] ) -> Dict: self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case ) def lowercase__ ( self : Dict ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__snake_case ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowercase__ ( self : Optional[int] ) -> int: pass def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCAmelCase = (self.model_tester.image_size // 32) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCAmelCase = len(__snake_case ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) self.assertEqual(out_len + 1 , len(__snake_case ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first attentions (first block, first layer) _lowerCAmelCase = (self.model_tester.image_size // 4) ** 2 _lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : int ) -> List[str]: def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ): _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__snake_case ) , __snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase__ ( self : Optional[Any] ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__snake_case ): continue _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.train() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = model(**__snake_case ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Tuple ) -> Dict: pass @slow def lowercase__ ( self : str ) -> Optional[int]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SegformerModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def lowercase__ ( self : Optional[Any] ) -> Any: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) ) @slow def lowercase__ ( self : Any ) -> str: # only resize + normalize _lowerCAmelCase = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case ) _lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __snake_case ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" ) _lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = outputs.logits.detach().cpu() _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __snake_case ) _lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) _lowerCAmelCase = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , __snake_case )
70
0
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset SCREAMING_SNAKE_CASE : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class _lowerCamelCase( nn.Module ): def __init__( self, lowerCamelCase) -> Dict: """simple docstring""" super().__init__() _lowercase : Optional[int] = torchvision.models.resnetaaa(pretrained=lowerCamelCase) _lowercase : List[Any] = list(model.children())[:-2] _lowercase : List[str] = nn.Sequential(*lowerCamelCase) _lowercase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds]) def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : Any = self.pool(self.model(lowerCamelCase)) _lowercase : Union[str, Any] = torch.flatten(lowerCamelCase, start_dim=2) _lowercase : int = out.transpose(1, 2).contiguous() return out # BxNx2048 class _lowerCamelCase( _a ): def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]: """simple docstring""" _lowercase : Any = [json.loads(lowerCamelCase) for l in open(lowerCamelCase)] _lowercase : Tuple = os.path.dirname(lowerCamelCase) _lowercase : Optional[int] = tokenizer _lowercase : Union[str, Any] = labels _lowercase : Optional[int] = len(lowerCamelCase) _lowercase : List[Any] = max_seq_length _lowercase : Any = transforms def __len__( self) -> Union[str, Any]: """simple docstring""" return len(self.data) def __getitem__( self, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'], add_special_tokens=lowerCamelCase)) _lowercase , _lowercase , _lowercase : Tuple = sentence[0], sentence[1:-1], sentence[-1] _lowercase : Any = sentence[: self.max_seq_length] _lowercase : Dict = torch.zeros(self.n_classes) _lowercase : Any = 1 _lowercase : List[str] = Image.open(os.path.join(self.data_dir, self.data[index]['img'])).convert('RGB') _lowercase : Optional[Any] = self.transforms(lowerCamelCase) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Optional[int] = Counter() for row in self.data: label_freqs.update(row['label']) return label_freqs def UpperCamelCase_( lowerCamelCase_ ) -> str: _lowercase : Union[str, Any] = [len(row['sentence'] ) for row in batch] _lowercase , _lowercase : Optional[int] = len(lowerCamelCase_ ), max(lowerCamelCase_ ) _lowercase : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long ) _lowercase : List[str] = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): _lowercase : List[Any] = input_row['sentence'] _lowercase : List[Any] = 1 _lowercase : Dict = torch.stack([row['image'] for row in batch] ) _lowercase : Union[str, Any] = torch.stack([row['label'] for row in batch] ) _lowercase : Any = torch.stack([row['image_start_token'] for row in batch] ) _lowercase : int = torch.stack([row['image_end_token'] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def UpperCamelCase_( ) -> int: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def UpperCamelCase_( ) -> Optional[Any]: return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ), ] )
352
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[Any] = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE : Tuple = { "b0": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 224, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 240, "dropout_rate": 0.2, "dw_padding": [16], }, "b2": { "hidden_dim": 1408, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 260, "dropout_rate": 0.3, "dw_padding": [5, 8, 16], }, "b3": { "hidden_dim": 1536, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 300, "dropout_rate": 0.3, "dw_padding": [5, 18], }, "b4": { "hidden_dim": 1792, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 380, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 2048, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 456, "dropout_rate": 0.4, "dw_padding": [13, 27], }, "b6": { "hidden_dim": 2304, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 528, "dropout_rate": 0.5, "dw_padding": [31], }, "b7": { "hidden_dim": 2560, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 600, "dropout_rate": 0.5, "dw_padding": [18], }, } def UpperCamelCase_( lowerCamelCase_ ) -> List[str]: _lowercase : Union[str, Any] = EfficientNetConfig() _lowercase : Any = CONFIG_MAP[model_name]['hidden_dim'] _lowercase : Any = CONFIG_MAP[model_name]['width_coef'] _lowercase : Optional[int] = CONFIG_MAP[model_name]['depth_coef'] _lowercase : List[Any] = CONFIG_MAP[model_name]['image_size'] _lowercase : Tuple = CONFIG_MAP[model_name]['dropout_rate'] _lowercase : Dict = CONFIG_MAP[model_name]['dw_padding'] _lowercase : str = 'huggingface/label-files' _lowercase : Optional[Any] = 'imagenet-1k-id2label.json' _lowercase : List[Any] = 1000 _lowercase : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) ) _lowercase : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} _lowercase : int = idalabel _lowercase : Optional[Any] = {v: k for k, v in idalabel.items()} return config def UpperCamelCase_( ) -> List[Any]: _lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) return im def UpperCamelCase_( lowerCamelCase_ ) -> Dict: _lowercase : Tuple = CONFIG_MAP[model_name]['image_size'] _lowercase : List[str] = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCamelCase_ , ) return preprocessor def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]: _lowercase : Tuple = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] _lowercase : Tuple = sorted(set(lowerCamelCase_ ) ) _lowercase : List[Any] = len(lowerCamelCase_ ) _lowercase : List[str] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )} _lowercase : Optional[int] = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: _lowercase : Union[str, Any] = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) _lowercase : Optional[Any] = {} for item in rename_keys: if item[0] in original_param_names: _lowercase : str = 'efficientnet.' + item[1] _lowercase : Optional[Any] = 'classifier.weight' _lowercase : List[str] = 'classifier.bias' return key_mapping def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: for key, value in tf_params.items(): if "normalization" in key: continue _lowercase : Any = key_mapping[key] if "_conv" in key and "kernel" in key: _lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: _lowercase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: _lowercase : Tuple = torch.from_numpy(np.transpose(lowerCamelCase_ ) ) else: _lowercase : List[str] = torch.from_numpy(lowerCamelCase_ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowerCamelCase_ ) @torch.no_grad() def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: _lowercase : Any = model_classes[model_name]( include_top=lowerCamelCase_ , weights='imagenet' , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation='softmax' , ) _lowercase : int = original_model.trainable_variables _lowercase : Dict = original_model.non_trainable_variables _lowercase : Optional[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _lowercase : int = param.numpy() _lowercase : int = list(tf_params.keys() ) # Load HuggingFace model _lowercase : int = get_efficientnet_config(lowerCamelCase_ ) _lowercase : List[str] = EfficientNetForImageClassification(lowerCamelCase_ ).eval() _lowercase : str = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) _lowercase : Optional[int] = rename_keys(lowerCamelCase_ ) replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Initialize preprocessor and preprocess input image _lowercase : Optional[Any] = convert_image_processor(lowerCamelCase_ ) _lowercase : Optional[Any] = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): _lowercase : Any = hf_model(**lowerCamelCase_ ) _lowercase : Optional[int] = outputs.logits.detach().numpy() # Original model inference _lowercase : List[Any] = False _lowercase : List[Any] = CONFIG_MAP[model_name]['image_size'] _lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) _lowercase : Optional[Any] = image.img_to_array(lowerCamelCase_ ) _lowercase : Any = np.expand_dims(lowerCamelCase_ , axis=0 ) _lowercase : Optional[int] = original_model.predict(lowerCamelCase_ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(lowerCamelCase_ ): os.mkdir(lowerCamelCase_ ) # Save converted model and image processor hf_model.save_pretrained(lowerCamelCase_ ) preprocessor.save_pretrained(lowerCamelCase_ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) _lowercase : str = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowerCamelCase_ ) hf_model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") SCREAMING_SNAKE_CASE : str = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
84
0
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase_ : Tuple = logging.getLogger(__name__) def _A (__a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE_ : List[Any] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] # custom device map if isinstance(__a , __a ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE_ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE_ : Optional[int] = get_keys_to_not_convert(__a ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__a ) SCREAMING_SNAKE_CASE_ : int = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE_ : List[str] = [] SCREAMING_SNAKE_CASE_ : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__a ) # compatibility with peft SCREAMING_SNAKE_CASE_ : int = load_in_abit SCREAMING_SNAKE_CASE_ : Any = load_in_abit SCREAMING_SNAKE_CASE_ : Any = get_parameter_device(__a ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a ) # convert param to the right dtype SCREAMING_SNAKE_CASE_ : int = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(__a , __a , __a ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__a ): param.to(__a ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f'The model device type is {model_device.type}. However, cuda is needed for quantization.' '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE_ : Optional[int] = replace_with_bnb_layers( __a , __a , modules_to_not_convert=__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = get_quantized_model_device_map( __a , __a , __a , max_memory=__a , no_split_module_classes=__a , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : List[str] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( __a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__a , device_map=__a , offload_dir=__a ) def _A (__a , __a , __a=None , __a=None , __a=None ) -> Union[str, Any]: """simple docstring""" if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ : List[Any] = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(__a , __a ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE_ : int = {} SCREAMING_SNAKE_CASE_ : List[Any] = special_dtypes SCREAMING_SNAKE_CASE_ : Union[str, Any] = no_split_module_classes SCREAMING_SNAKE_CASE_ : List[Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_balanced_memory( __a , low_zero=(device_map == '''balanced_low_0''') , max_memory=__a , **__a , ) SCREAMING_SNAKE_CASE_ : List[str] = max_memory SCREAMING_SNAKE_CASE_ : List[Any] = infer_auto_device_map(__a , **__a ) if isinstance(__a , __a ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE_ : int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE_ : Dict = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _A (__a , __a , __a=None , __a=None ) -> str: """simple docstring""" if modules_to_not_convert is None: SCREAMING_SNAKE_CASE_ : List[str] = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _replace_with_bnb_layers( __a , __a , __a , __a ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _A (__a , __a , __a=None , __a=None , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE_ : Dict = [] current_key_name.append(__a ) if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE_ : int = '''.'''.join(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE_ : int = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ : Dict = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE_ : int = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) SCREAMING_SNAKE_CASE_ : int = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE_ : List[Any] = module.bias.data bnb_module.requires_grad_(__a ) setattr(__a , __a , __a ) SCREAMING_SNAKE_CASE_ : Optional[int] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = _replace_with_bnb_layers( __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _A (__a ) -> Any: """simple docstring""" with init_empty_weights(): SCREAMING_SNAKE_CASE_ : Tuple = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE_ : Any = find_tied_parameters(__a ) # For compatibility with Accelerate < 0.18 if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE_ : List[str] = sum(__a , [] ) SCREAMING_SNAKE_CASE_ : Dict = len(__a ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE_ : Tuple = False if hasattr(__a , '''base_model_prefix''' ): SCREAMING_SNAKE_CASE_ : int = not hasattr(__a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE_ : str = list(model.named_children() ) SCREAMING_SNAKE_CASE_ : List[str] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE_ : Any = set(__a ) - set(__a ) SCREAMING_SNAKE_CASE_ : Tuple = list(set(__a ) ) + list(__a ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE_ : int = ['''.weight''', '''.bias'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace(__a , '''''' ) filtered_module_names.append(__a ) return filtered_module_names def _A (__a ) -> Any: """simple docstring""" for m in model.modules(): if isinstance(__a , bnb.nn.Linearabit ): return True return False def _A (__a ) -> Union[str, Any]: """simple docstring""" return next(parameter.parameters() ).device def _A (__a , __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = param_name SCREAMING_SNAKE_CASE_ : List[str] = model if "." in tensor_name: SCREAMING_SNAKE_CASE_ : List[str] = tensor_name.split('''.''' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE_ : str = getattr(__a , __a ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) SCREAMING_SNAKE_CASE_ : List[Any] = new_module SCREAMING_SNAKE_CASE_ : Optional[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE_ : Optional[Any] = False offload_weight(module._parameters[tensor_name] , __a , __a , index=__a ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __a , index=__a , ) else: offload_weight(__a , __a , __a , index=__a ) offload_weight(__a , param_name.replace('''weight''' , '''SCB''' ) , __a , index=__a ) set_module_tensor_to_device(__a , __a , '''meta''' , dtype=__a , value=torch.empty(*param.size() ) )
91
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCamelCase : List[Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
356
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter __UpperCamelCase : Optional[Any] = '''Create a default config file for Accelerate with only a few flags set.''' def __SCREAMING_SNAKE_CASE ( A_="no" , A_ = default_json_config_file , A_ = False ): lowerCAmelCase__ : List[Any] = Path(A_ ) path.parent.mkdir(parents=A_ , exist_ok=A_ ) if path.exists(): print( f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowerCAmelCase__ : Optional[int] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowerCAmelCase__ : Optional[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): lowerCAmelCase__ : Union[str, Any] = torch.cuda.device_count() lowerCAmelCase__ : Tuple = num_gpus lowerCAmelCase__ : List[str] = False if num_gpus > 1: lowerCAmelCase__ : Any = '''MULTI_GPU''' else: lowerCAmelCase__ : Union[str, Any] = '''NO''' elif is_xpu_available() and use_xpu: lowerCAmelCase__ : Optional[Any] = torch.xpu.device_count() lowerCAmelCase__ : Tuple = num_xpus lowerCAmelCase__ : List[str] = False if num_xpus > 1: lowerCAmelCase__ : Union[str, Any] = '''MULTI_XPU''' else: lowerCAmelCase__ : List[Any] = '''NO''' elif is_npu_available(): lowerCAmelCase__ : Optional[int] = torch.npu.device_count() lowerCAmelCase__ : List[Any] = num_npus lowerCAmelCase__ : Optional[int] = False if num_npus > 1: lowerCAmelCase__ : Any = '''MULTI_NPU''' else: lowerCAmelCase__ : int = '''NO''' else: lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : Optional[Any] = '''NO''' lowerCAmelCase__ : Optional[Any] = ClusterConfig(**A_ ) config.to_json_file(A_ ) return path def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Any = parser.add_parser('''default''' , parents=A_ , help=A_ , formatter_class=A_ ) parser.add_argument( '''--config_file''' , default=A_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=A_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=A_ ) return parser def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : List[str] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f'accelerate configuration saved at {config_file}' )
74
0
from __future__ import annotations from functools import lru_cache from math import ceil A : str = 1_0_0 A : Union[str, Any] = set(range(3, NUM_PRIMES, 2)) primes.add(2) A : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def __lowerCAmelCase ( a__ ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __a = set() __a = 42 __a = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def __lowerCAmelCase ( a__ = 5000 ) -> int | None: for number_to_partition in range(1 , a__ ): if len(partition(a__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
6
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = [] create_all_state(1, __UpperCamelCase, __UpperCamelCase, [], __UpperCamelCase ) return result def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> Optional[int]: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(__UpperCamelCase, total_number - level + 2 ): current_list.append(__UpperCamelCase ) create_all_state(i + 1, __UpperCamelCase, level - 1, __UpperCamelCase, __UpperCamelCase ) current_list.pop() def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' for i in total_list: print(*__UpperCamelCase ) if __name__ == "__main__": a : int = 4 a : str = 2 a : List[str] = generate_all_combinations(n, k) print_all_state(total_list)
368
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> list[list]: '''simple docstring''' snake_case_ = current_set.copy() for row_index, row in enumerate(__UpperCAmelCase ): snake_case_ = row[0] for column_index, column in enumerate(__UpperCAmelCase ): if magnitude == 0: snake_case_ = column continue snake_case_ = column / magnitude # Subtract to cancel term snake_case_ = current_set[0] snake_case_ = [first_row] snake_case_ = current_set[1::] for row in current_set: snake_case_ = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCAmelCase ) continue for column_index in range(len(__UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: snake_case_ = final_set[0] snake_case_ = [] snake_case_ = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) snake_case_ = simplify(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, __UpperCAmelCase ) snake_case_ = resultant return final_set def __magic_name__ ( __UpperCAmelCase ) -> list: '''simple docstring''' if len(__UpperCAmelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) snake_case_ = len(__UpperCAmelCase ) + 1 if any(len(__UpperCAmelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCAmelCase, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] snake_case_ = equations.copy() if any(0 in row for row in data_set ): snake_case_ = data_set.copy() snake_case_ = [] for row_index, row in enumerate(__UpperCAmelCase ): if 0 not in row: snake_case_ = data_set.pop(__UpperCAmelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, __UpperCAmelCase ) snake_case_ = data_set.copy() snake_case_ = simplify(__UpperCAmelCase ) snake_case_ = simplified[::-1] snake_case_ = [] for row in simplified: snake_case_ = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue snake_case_ = row.copy()[: len(__UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCAmelCase ) == 0: solutions.append(0 ) continue snake_case_ = temp_row[1::] snake_case_ = temp_row[::-1] for column_index, column in enumerate(__UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCAmelCase ) snake_case_ = [] for item in solutions: final.append(float(round(__UpperCAmelCase, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() a : str = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = 42 class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase): @register_to_config def __init__(self , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = ("DownEncoderBlock2D",) , UpperCAmelCase = ("UpDecoderBlock2D",) , UpperCAmelCase = (6_4,) , UpperCAmelCase = 1 , UpperCAmelCase = "silu" , UpperCAmelCase = 3 , UpperCAmelCase = 3_2 , UpperCAmelCase = 2_5_6 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = 0.1_8215 , UpperCAmelCase = "group" , ) -> Any: super().__init__() # pass init params to Encoder _lowercase =Encoder( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , ) _lowercase =vq_embed_dim if vq_embed_dim is not None else latent_channels _lowercase =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 ) _lowercase =VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.25 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase ) _lowercase =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 ) # pass init params to Decoder _lowercase =Decoder( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , ) @apply_forward_hook def __A (self , UpperCAmelCase , UpperCAmelCase = True ) -> VQEncoderOutput: _lowercase =self.encoder(UpperCAmelCase ) _lowercase =self.quant_conv(UpperCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCAmelCase ) @apply_forward_hook def __A (self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: _lowercase , _lowercase , _lowercase =self.quantize(UpperCAmelCase ) else: _lowercase =h _lowercase =self.post_quant_conv(UpperCAmelCase ) _lowercase =self.decoder(UpperCAmelCase , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase ) def __A (self , UpperCAmelCase , UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: _lowercase =sample _lowercase =self.encode(UpperCAmelCase ).latents _lowercase =self.decode(UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase )
5
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Optional[int] = 0 __lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowerCamelCase__ ): return None __lowerCamelCase : Tuple = sorted_collection[point] if current_item == item: return point else: if point < left: __lowerCamelCase : List[Any] = left __lowerCamelCase : Tuple = point elif point > right: __lowerCamelCase : Dict = right __lowerCamelCase : str = point else: if item < current_item: __lowerCamelCase : Dict = point - 1 else: __lowerCamelCase : Dict = point + 1 return None def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowerCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) elif point > right: return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 ) else: return interpolation_search_by_recursion( lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: if collection != sorted(lowerCamelCase__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys a =0 if debug == 1: a =[10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") a =67 a =interpolation_search(collection, target) if result is not None: print(F"""{target} found at positions: {result}""") else: print("""Not found""")
73
0
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: __lowerCamelCase : Tuple = ksize + 1 __lowerCamelCase : int = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(SCREAMING_SNAKE_CASE__ ): for x in range(SCREAMING_SNAKE_CASE__ ): # distance from center __lowerCamelCase : Optional[int] = x - ksize // 2 __lowerCamelCase : Union[str, Any] = y - ksize // 2 # degree to radiant __lowerCamelCase : List[str] = theta / 180 * np.pi __lowerCamelCase : Union[str, Any] = np.cos(_theta ) __lowerCamelCase : Any = np.sin(_theta ) # get kernel x __lowerCamelCase : List[Any] = cos_theta * px + sin_theta * py # get kernel y __lowerCamelCase : Optional[int] = -sin_theta * px + cos_theta * py # fill kernel __lowerCamelCase : Optional[Any] = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image lowercase_ = imread('../image_data/lena.jpg') # turn image in gray scale value lowercase_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges lowercase_ = np.zeros(gray.shape[:2]) for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]: lowercase_ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) lowercase_ = out / out.max() * 2_5_5 lowercase_ = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
369
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : str = 0 __lowerCamelCase : Tuple = len(SCREAMING_SNAKE_CASE__ ) for i in range(n - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): if len(SCREAMING_SNAKE_CASE__ ) <= 1: return arr, 0 __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) // 2 __lowerCamelCase : Union[str, Any] = arr[0:mid] __lowerCamelCase : List[Any] = arr[mid:] __lowerCamelCase , __lowerCamelCase : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : List[str] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Dict = _count_cross_inversions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[str] = inversion_p + inversions_q + cross_inversions return c, num_inversions def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[int] = [] __lowerCamelCase : List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE__ ) and j < len(SCREAMING_SNAKE_CASE__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(SCREAMING_SNAKE_CASE__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(SCREAMING_SNAKE_CASE__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def UpperCamelCase__ ( ): __lowerCamelCase : Optional[int] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) # an empty list should also have zero inversions __lowerCamelCase : List[str] = [] __lowerCamelCase : Dict = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
194
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = args.log_outputs snake_case = '_'.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric snake_case = load_metric('''wer''' ) snake_case = load_metric('''cer''' ) # compute metrics snake_case = wer.compute(references=result['''target'''] ,predictions=result['''prediction'''] ) snake_case = cer.compute(references=result['''target'''] ,predictions=result['''prediction'''] ) # print & log results snake_case = F'''WER: {wer_result}\nCER: {cer_result}''' print(_UpperCAmelCase ) with open(F'''{dataset_id}_eval_results.txt''' ,'''w''' ) as f: f.write(_UpperCAmelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case = F'''log_{dataset_id}_predictions.txt''' snake_case = F'''log_{dataset_id}_targets.txt''' with open(_UpperCAmelCase ,'''w''' ) as p, open(_UpperCAmelCase ,'''w''' ) as t: # mapping function to write output def write_to_file(UpperCamelCase_ ,UpperCamelCase_ ): p.write(F'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_UpperCAmelCase ,with_indices=_UpperCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case = re.sub(_UpperCAmelCase ,'''''' ,text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: snake_case = ' '.join(text.split(_UpperCAmelCase ) ) return text def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=_UpperCAmelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case = feature_extractor.sampling_rate # resample audio snake_case = dataset.cast_column('''audio''' ,Audio(sampling_rate=_UpperCAmelCase ) ) # load eval pipeline if args.device is None: snake_case = 0 if torch.cuda.is_available() else -1 snake_case = pipeline('''automatic-speech-recognition''' ,model=args.model_id ,device=args.device ) # map function to decode audio def map_to_pred(UpperCamelCase_ ): snake_case = asr( batch['''audio''']['''array'''] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s ) snake_case = prediction['text'] snake_case = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples snake_case = dataset.map(_UpperCAmelCase ,remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_UpperCAmelCase ,_UpperCAmelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) _SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() main(args)
351
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class A__ ( enum.Enum ): """simple docstring""" __magic_name__ = 0 __magic_name__ = 1 @add_end_docstrings(snake_case__ ) class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = 'generated' def __init__( self , *__snake_case , **__snake_case ): super().__init__(*__snake_case , **__snake_case ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ): snake_case = {} if truncation is not None: snake_case = truncation snake_case = generate_kwargs snake_case = {} if return_tensors is not None and return_type is None: snake_case = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: snake_case = return_type if clean_up_tokenization_spaces is not None: snake_case = clean_up_tokenization_spaces if stop_sequence is not None: snake_case = self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) if len(__snake_case ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , __snake_case , __snake_case , __snake_case ): return True def a_ ( self , *__snake_case , __snake_case ): snake_case = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , __snake_case ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) snake_case = ([prefix + arg for arg in args[0]],) snake_case = True elif isinstance(args[0] , __snake_case ): snake_case = (prefix + args[0],) snake_case = False else: raise ValueError( F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' ) snake_case = self.tokenizer(*__snake_case , padding=__snake_case , truncation=__snake_case , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *__snake_case , **__snake_case ): snake_case = super().__call__(*__snake_case , **__snake_case ) if ( isinstance(args[0] , __snake_case ) and all(isinstance(__snake_case , __snake_case ) for el in args[0] ) and all(len(__snake_case ) == 1 for res in result ) ): return [res[0] for res in result] return result def a_ ( self , __snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , **__snake_case ): snake_case = self._parse_and_tokenize(__snake_case , truncation=__snake_case , **__snake_case ) return inputs def a_ ( self , __snake_case , **__snake_case ): if self.framework == "pt": snake_case , snake_case = model_inputs['''input_ids'''].shape elif self.framework == "tf": snake_case , snake_case = tf.shape(model_inputs['''input_ids'''] ).numpy() snake_case = generate_kwargs.get('''min_length''' , self.model.config.min_length ) snake_case = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(__snake_case , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) snake_case = self.model.generate(**__snake_case , **__snake_case ) snake_case = output_ids.shape[0] if self.framework == "pt": snake_case = output_ids.reshape(__snake_case , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": snake_case = tf.reshape(__snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def a_ ( self , __snake_case , __snake_case=ReturnType.TEXT , __snake_case=False ): snake_case = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: snake_case = {F'''{self.return_name}_token_ids''': output_ids} elif return_type == ReturnType.TEXT: snake_case = { F'''{self.return_name}_text''': self.tokenizer.decode( __snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , ) } records.append(__snake_case ) return records @add_end_docstrings(snake_case__ ) class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = 'summary' def __call__( self , *__snake_case , **__snake_case ): return super().__call__(*__snake_case , **__snake_case ) def a_ ( self , __snake_case , __snake_case , __snake_case ): if max_length < min_length: logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' ) if input_length < max_length: logger.warning( F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ''' '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' ) @add_end_docstrings(snake_case__ ) class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = 'translation' def a_ ( self , __snake_case , __snake_case , __snake_case ): if input_length > 0.9 * max_length: logger.warning( F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ''' '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def a_ ( self , *__snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , __snake_case=None , __snake_case=None ): if getattr(self.tokenizer , '''_build_translation_inputs''' , __snake_case ): return self.tokenizer._build_translation_inputs( *__snake_case , return_tensors=self.framework , truncation=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case ) else: return super()._parse_and_tokenize(*__snake_case , truncation=__snake_case ) def a_ ( self , __snake_case=None , __snake_case=None , **__snake_case ): snake_case , snake_case , snake_case = super()._sanitize_parameters(**__snake_case ) if src_lang is not None: snake_case = src_lang if tgt_lang is not None: snake_case = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. snake_case = kwargs.get('''task''' , self.task ) snake_case = task.split('''_''' ) if task and len(__snake_case ) == 4: # translation, XX, to YY snake_case = items[1] snake_case = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *__snake_case , **__snake_case ): return super().__call__(*__snake_case , **__snake_case )
213
0
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase_ : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING def _lowercase ( self ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _lowercase ( self ) -> List[str]: lowerCamelCase : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" ) lowerCamelCase : Dict = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser"}, ] , ) lowerCamelCase : int = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser", }, ] , ) lowerCamelCase : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def _lowercase ( self ) -> List[str]: lowerCamelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" ) lowerCamelCase : Tuple = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"}, ] , ) lowerCamelCase : Union[str, Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"}, ] , ) lowerCamelCase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"}, ] , ) lowerCamelCase : int = unmasker("My name is <mask> <mask>" , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ [ { "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def _lowercase ( self ) -> Dict: lowerCamelCase : Any = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" ) # convert model to fp16 pipe.model.half() lowerCamelCase : Tuple = pipe("Paris is the [MASK] of France." ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow @require_torch def _lowercase ( self ) -> List[Any]: lowerCamelCase : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" ) self.run_large_test(UpperCamelCase__ ) @slow @require_tf def _lowercase ( self ) -> str: lowerCamelCase : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" ) self.run_large_test(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : List[Any] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ] , ) lowerCamelCase : List[Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 1_2790, "token_str": " Lyon", }, ] , ) lowerCamelCase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" ) lowerCamelCase : List[str] = None lowerCamelCase : Dict = None self.run_pipeline_test(UpperCamelCase__ , [] ) @require_tf def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" ) lowerCamelCase : int = None lowerCamelCase : List[Any] = None self.run_pipeline_test(UpperCamelCase__ , [] ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" ) lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [ F'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = fill_masker.tokenizer lowerCamelCase : Optional[Any] = fill_masker.model lowerCamelCase : Optional[int] = fill_masker( F'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : int = fill_masker([F'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : Any = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( UpperCamelCase__ , [ [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], ] , ) with self.assertRaises(UpperCamelCase__ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(UpperCamelCase__ ): fill_masker("This is" ) self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ ) self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ ) self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ ) self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ ) self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: lowerCamelCase : Dict = tokenizer.get_vocab() lowerCamelCase : Tuple = sorted(vocab.keys() )[:2] # Pipeline argument lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ ) lowerCamelCase : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : List[str] = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ ) lowerCamelCase : Any = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) ) # Call argument lowerCamelCase : Tuple = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : List[str] = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ ) lowerCamelCase : List[str] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) ) # Score equivalence lowerCamelCase : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [top_mask["token_str"] for top_mask in outputs] lowerCamelCase : Union[str, Any] = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCamelCase__ ) == set(UpperCamelCase__ ): lowerCamelCase : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) lowerCamelCase : List[str] = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) # Raises with invalid with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""] ) with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="" ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 ) lowerCamelCase : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = tokenizer.get_vocab() lowerCamelCase : Any = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # top_k=2, ntargets=3 lowerCamelCase : Any = sorted(vocab.keys() )[:3] lowerCamelCase : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ ) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCamelCase : List[Any] = [el["token_str"] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ ) # They should yield exactly the same result self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str: lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : str = tokenizer.get_vocab() # String duplicates + id duplicates lowerCamelCase : Tuple = sorted(vocab.keys() )[:3] lowerCamelCase : List[str] = [targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCamelCase : int = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(UpperCamelCase__ ) , 3 ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Tuple = fill_masker( F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( UpperCamelCase__ , [ [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], ] , )
48
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase , _lowercase : Optional[Any] = emb.weight.shape _lowercase : Optional[int] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) _lowercase : str = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : List[Any] = torch.load(__UpperCAmelCase , map_location="""cpu""" ) _lowercase : Optional[Any] = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] _lowercase : Tuple = mam_aaa["""model"""] remove_ignore_keys_(__UpperCAmelCase ) _lowercase : Tuple = state_dict["""encoder.embed_tokens.weight"""].shape[0] _lowercase : List[str] = MaMaaaConfig( vocab_size=__UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _lowercase : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""] _lowercase : List[Any] = MaMaaaForConditionalGeneration(__UpperCAmelCase ) model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) _lowercase : List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase: Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCAmelCase: Optional[Any] = parser.parse_args() UpperCAmelCase: Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
1
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig snake_case_ : List[str] = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } snake_case_ : Tuple = logging.get_logger(__name__) class lowercase__ ( lowercase ): lowercase__ = """maskformer""" lowercase__ = {"""hidden_size""": """mask_feature_size"""} lowercase__ = ["""resnet""", """swin"""] lowercase__ = ["""detr"""] def __init__( self : Union[str, Any] ,lowerCamelCase__ : int = 256 ,lowerCamelCase__ : int = 256 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[Dict] = None ,lowerCamelCase__ : Optional[Dict] = None ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : float = 2_0.0 ,lowerCamelCase__ : Optional[bool] = None ,**lowerCamelCase__ : Any ,): '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _UpperCamelCase : Optional[Any] = SwinConfig( image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Tuple = backbone_config.pop('model_type' ) _UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase : Optional[int] = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' F'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _UpperCamelCase : List[Any] = DetrConfig() else: # verify that the decoder is supported _UpperCamelCase : Tuple = ( decoder_config.pop('model_type' ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'Transformer Decoder {decoder_type} not supported, please use one of' F' {",".join(self.decoders_supported )}' ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Optional[Any] = CONFIG_MAPPING[decoder_type] _UpperCamelCase : Optional[Any] = config_class.from_dict(lowerCamelCase__ ) _UpperCamelCase : Dict = backbone_config _UpperCamelCase : Optional[Any] = decoder_config # main feature dimension for the model _UpperCamelCase : Dict = fpn_feature_size _UpperCamelCase : str = mask_feature_size # initializer _UpperCamelCase : Any = init_std _UpperCamelCase : Any = init_xavier_std # Hungarian matcher && loss _UpperCamelCase : Optional[int] = cross_entropy_weight _UpperCamelCase : Union[str, Any] = dice_weight _UpperCamelCase : Tuple = mask_weight _UpperCamelCase : Optional[int] = use_auxiliary_loss _UpperCamelCase : List[str] = no_object_weight _UpperCamelCase : Optional[int] = output_auxiliary_logits _UpperCamelCase : List[str] = self.decoder_config.encoder_attention_heads _UpperCamelCase : Optional[Any] = self.decoder_config.num_hidden_layers super().__init__(**lowerCamelCase__ ) @classmethod def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,**lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' return cls( backbone_config=lowerCamelCase__ ,decoder_config=lowerCamelCase__ ,**lowerCamelCase__ ,) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _UpperCamelCase : str = self.backbone_config.to_dict() _UpperCamelCase : Any = self.decoder_config.to_dict() _UpperCamelCase : Any = self.__class__.model_type return output
83
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase__ = IFInpaintingSuperResolutionPipeline lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"} def _UpperCAmelCase ( self : Any): """simple docstring""" return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=0): """simple docstring""" if str(lowerCAmelCase_).startswith("""mps"""): lowercase_ = torch.manual_seed(lowerCAmelCase_) else: lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_) lowercase_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_) lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_) lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_) lowercase_ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _UpperCAmelCase ( self : int): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def _UpperCAmelCase ( self : str): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _UpperCAmelCase ( self : str): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _UpperCAmelCase ( self : Dict): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _UpperCAmelCase ( self : str): """simple docstring""" self._test_save_load_local() def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
362
"""simple docstring""" from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , lowerCAmelCase_ : int = 6): """simple docstring""" lowercase_ = None lowercase_ = None self.create_linked_list(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int): """simple docstring""" lowercase_ = Node() lowercase_ = current_node lowercase_ = current_node lowercase_ = current_node for _ in range(1 , lowerCAmelCase_): lowercase_ = Node() lowercase_ = current_node lowercase_ = previous_node lowercase_ = current_node lowercase_ = self.front lowercase_ = previous_node def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any): """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): lowercase_ = self.rear.next if self.rear: lowercase_ = data def _UpperCAmelCase ( self : str): """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: lowercase_ = self.front.data lowercase_ = None return data lowercase_ = self.front lowercase_ = old_front.next lowercase_ = old_front.data lowercase_ = None return data def _UpperCAmelCase ( self : Any): """simple docstring""" if self.is_empty(): raise Exception("""Empty Queue""") def _UpperCAmelCase ( self : Tuple): """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""") class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str]): """simple docstring""" lowercase_ = None lowercase_ = None lowercase_ = None if __name__ == "__main__": import doctest doctest.testmod()
313
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowerCAmelCase : List[str] =logging.get_logger(__name__) class a_ ( _lowerCAmelCase ): def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : str ): """simple docstring""" warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
223
"""simple docstring""" from sklearn.metrics import matthews_corrcoef import datasets _a = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ _a = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric(\"matthews_correlation\") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ _a = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase( datasets.Metric ): def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32'''), '''references''': datasets.Value('''int32'''), }) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def UpperCAmelCase ( self , __a , __a , __a=None) -> Dict: '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(__a , __a , sample_weight=__a)), }
194
0
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ : Any = logging.get_logger(__name__) class a__ ( SCREAMING_SNAKE_CASE__ ): a : Optional[int] = ["""pixel_values"""] def __init__( self , A = True , A = 1 / 255 , A = True , A = 8 , **A , ) -> None: '''simple docstring''' super().__init__(**A ) a = do_rescale a = rescale_factor a = do_pad a = pad_size def lowerCAmelCase_ ( self , A , A , A = None , **A ) -> np.ndarray: '''simple docstring''' return rescale(A , scale=A , data_format=A , **A ) def lowerCAmelCase_ ( self , A , A , A = None ) -> List[Any]: '''simple docstring''' a = get_image_size(A ) a = (old_height // size + 1) * size - old_height a = (old_width // size + 1) * size - old_width return pad(A , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=A ) def lowerCAmelCase_ ( self , A , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> List[Any]: '''simple docstring''' a = do_rescale if do_rescale is not None else self.do_rescale a = rescale_factor if rescale_factor is not None else self.rescale_factor a = do_pad if do_pad is not None else self.do_pad a = pad_size if pad_size is not None else self.pad_size a = make_list_of_images(A ) if not valid_images(A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. a = [to_numpy_array(A ) for image in images] if do_rescale: a = [self.rescale(image=A , scale=A ) for image in images] if do_pad: a = [self.pad(A , size=A ) for image in images] a = [to_channel_dimension_format(A , A ) for image in images] a = {'pixel_values': images} return BatchFeature(data=A , tensor_type=A )
359
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowercase__ : Optional[int] = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize lowercase__ : int = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" lowercase__ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" lowercase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def lowerCAmelCase_ ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def lowerCAmelCase_ ( self , A ) -> List[str]: '''simple docstring''' import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def lowerCAmelCase_ ( self , A , A , A=0.9 , A=3 , A=0.5 ) -> Tuple: '''simple docstring''' if NLTK_VERSION >= version.Version("3.6.5" ): a = [ meteor_score.single_meteor_score( word_tokenize(A ) , word_tokenize(A ) , alpha=A , beta=A , gamma=A ) for ref, pred in zip(A , A ) ] else: a = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A ) for ref, pred in zip(A , A ) ] return {"meteor": np.mean(A )}
180
0
"""simple docstring""" import re from filelock import FileLock try: import nltk __lowercase = True except (ImportError, ModuleNotFoundError): __lowercase = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def lowercase ( A_ )-> str: '''simple docstring''' re.sub("<n>" , "" , A_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(A_ ) )
40
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : list ): if not nums: raise ValueError('List is empty' ) return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
213
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class __UpperCAmelCase ( _UpperCamelCase ): UpperCamelCase = 'marian' UpperCamelCase = ['past_key_values'] UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[str], __A : Union[str, Any]=5_8_1_0_1, __A : Tuple=None, __A : int=1_0_2_4, __A : Tuple=1_2, __A : Any=4_0_9_6, __A : int=1_6, __A : Any=1_2, __A : Optional[Any]=4_0_9_6, __A : List[str]=1_6, __A : List[str]=0.0, __A : Any=0.0, __A : Dict=True, __A : Union[str, Any]=True, __A : str="gelu", __A : Any=1_0_2_4, __A : Any=0.1, __A : List[str]=0.0, __A : List[Any]=0.0, __A : Tuple=0.0_2, __A : Optional[int]=5_8_1_0_0, __A : Dict=False, __A : str=5_8_1_0_0, __A : Tuple=0, __A : Union[str, Any]=0, __A : List[Any]=True, **__A : Any, ): UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : List[str] = decoder_vocab_size or vocab_size UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Union[str, Any] = d_model UpperCAmelCase : Any = encoder_ffn_dim UpperCAmelCase : Tuple = encoder_layers UpperCAmelCase : List[Any] = encoder_attention_heads UpperCAmelCase : Dict = decoder_ffn_dim UpperCAmelCase : Dict = decoder_layers UpperCAmelCase : Optional[int] = decoder_attention_heads UpperCAmelCase : Optional[int] = dropout UpperCAmelCase : List[str] = attention_dropout UpperCAmelCase : int = activation_dropout UpperCAmelCase : Optional[int] = activation_function UpperCAmelCase : Tuple = init_std UpperCAmelCase : List[Any] = encoder_layerdrop UpperCAmelCase : List[str] = decoder_layerdrop UpperCAmelCase : Dict = use_cache UpperCAmelCase : Dict = encoder_layers UpperCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : Optional[Any] = share_encoder_decoder_embeddings super().__init__( pad_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, is_encoder_decoder=_UpperCAmelCase, decoder_start_token_id=_UpperCAmelCase, forced_eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) class __UpperCAmelCase ( _UpperCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def __magic_name__ ( self : List[str] ): if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase : List[Any] = {0: 'batch'} UpperCAmelCase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: UpperCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} UpperCAmelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase, direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase : Dict = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase : Tuple = self.num_layers for i in range(_UpperCAmelCase ): UpperCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'} UpperCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'} else: UpperCAmelCase : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def __magic_name__ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase : int = super().outputs else: UpperCAmelCase : int = super(_UpperCAmelCase, self ).outputs if self.use_past: UpperCAmelCase : int = self.num_layers for i in range(_UpperCAmelCase ): UpperCAmelCase : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'} UpperCAmelCase : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def __magic_name__ ( self : Any, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # Generate decoder inputs UpperCAmelCase : List[str] = seq_length if not self.use_past else 1 UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) UpperCAmelCase : Dict = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase : int = dict(**_UpperCAmelCase, **_UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase : str = common_inputs['input_ids'].shape UpperCAmelCase : Tuple = common_inputs['decoder_input_ids'].shape[1] UpperCAmelCase : Optional[int] = self.num_attention_heads UpperCAmelCase : Tuple = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase : Optional[Any] = decoder_seq_length + 3 UpperCAmelCase : Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase : List[Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_UpperCAmelCase, _UpperCAmelCase )], dim=1 ) UpperCAmelCase : Dict = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase : Optional[Any] = self.num_layers UpperCAmelCase : List[Any] = min(_UpperCAmelCase, _UpperCAmelCase ) UpperCAmelCase : Optional[int] = max(_UpperCAmelCase, _UpperCAmelCase ) - min_num_layers UpperCAmelCase : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(_UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), ) ) # TODO: test this. UpperCAmelCase : Optional[int] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(_UpperCAmelCase, _UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) ) return common_inputs def __magic_name__ ( self : Union[str, Any], __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values UpperCAmelCase : Optional[int] = seqlen + 2 UpperCAmelCase : List[str] = self.num_layers UpperCAmelCase : Tuple = self.num_attention_heads UpperCAmelCase : int = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase : List[str] = common_inputs['attention_mask'].dtype UpperCAmelCase : Tuple = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase, _UpperCAmelCase, dtype=_UpperCAmelCase )], dim=1 ) UpperCAmelCase : int = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(_UpperCAmelCase ) ] return common_inputs def __magic_name__ ( self : int, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase : List[str] = compute_effective_axis_dimension( _UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase : List[Any] = tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension( _UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase : Tuple = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase : Any = dict(tokenizer(_UpperCAmelCase, return_tensors=_UpperCAmelCase ) ) return common_inputs def __magic_name__ ( self : List[str], __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase ) else: UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm( _UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase ) return common_inputs def __magic_name__ ( self : Tuple, __A : Union[str, Any], __A : Any, __A : int, __A : List[str] ): if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase : List[str] = super()._flatten_past_key_values_(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) else: UpperCAmelCase : Tuple = super(_UpperCAmelCase, self )._flatten_past_key_values_( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) @property def __magic_name__ ( self : Optional[int] ): return 1E-4
368
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ShapEImgaImgPipeline UpperCamelCase = ["""image"""] UpperCamelCase = ["""image"""] UpperCamelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] UpperCamelCase = False @property def __magic_name__ ( self : Optional[Any] ): return 3_2 @property def __magic_name__ ( self : Optional[int] ): return 3_2 @property def __magic_name__ ( self : Union[str, Any] ): return self.time_input_dim * 4 @property def __magic_name__ ( self : Any ): return 8 @property def __magic_name__ ( self : List[str] ): torch.manual_seed(0 ) UpperCAmelCase : str = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=6_4, projection_dim=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) UpperCAmelCase : Any = CLIPVisionModel(__A ) return model @property def __magic_name__ ( self : int ): UpperCAmelCase : Optional[int] = CLIPImageProcessor( crop_size=2_2_4, do_center_crop=__A, do_normalize=__A, do_resize=__A, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_2_4, ) return image_processor @property def __magic_name__ ( self : Dict ): torch.manual_seed(0 ) UpperCAmelCase : Any = { '''num_attention_heads''': 2, '''attention_head_dim''': 1_6, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 3_2, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } UpperCAmelCase : List[Any] = PriorTransformer(**__A ) return model @property def __magic_name__ ( self : Union[str, Any] ): torch.manual_seed(0 ) UpperCAmelCase : List[Any] = { '''param_shapes''': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 1_2, '''background''': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase : List[str] = ShapERenderer(**__A ) return model def __magic_name__ ( self : List[Any] ): UpperCAmelCase : str = self.dummy_prior UpperCAmelCase : List[str] = self.dummy_image_encoder UpperCAmelCase : List[Any] = self.dummy_image_processor UpperCAmelCase : Dict = self.dummy_renderer UpperCAmelCase : int = HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_0_2_4, prediction_type='''sample''', use_karras_sigmas=__A, clip_sample=__A, clip_sample_range=1.0, ) UpperCAmelCase : List[str] = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : List[Any], __A : Dict, __A : List[Any]=0 ): UpperCAmelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith('''mps''' ): UpperCAmelCase : List[str] = torch.manual_seed(__A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) UpperCAmelCase : Dict = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 3_2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : str ): UpperCAmelCase : Dict = '''cpu''' UpperCAmelCase : Any = self.get_dummy_components() UpperCAmelCase : Tuple = self.pipeline_class(**__A ) UpperCAmelCase : Dict = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__A ) ) UpperCAmelCase : Any = output.images[0] UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) UpperCAmelCase : Optional[int] = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Dict ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = torch_device == '''cpu''' UpperCAmelCase : List[Any] = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=__A, relax_max_difference=__A, ) def __magic_name__ ( self : Dict ): UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__A ) UpperCAmelCase : List[str] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Any = 1 UpperCAmelCase : Any = 2 UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__A ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase : Any = batch_size * [inputs[key]] UpperCAmelCase : int = pipe(**__A, num_images_per_prompt=__A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) UpperCAmelCase : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) UpperCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) UpperCAmelCase : int = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Dict = torch.Generator(device=__A ).manual_seed(0 ) UpperCAmelCase : List[str] = pipe( __A, generator=__A, guidance_scale=3.0, num_inference_steps=6_4, frame_size=6_4, output_type='''np''', ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(__A, __A )
99
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def a__ ( UpperCAmelCase : List[str] ) -> str: UpperCAmelCase : Dict = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase ) UpperCAmelCase : str = emb.weight.data return lin_layer def a__ ( UpperCAmelCase : Tuple ) -> Dict: UpperCAmelCase : Optional[int] = torch.load(UpperCAmelCase , map_location='''cpu''' ) UpperCAmelCase : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] UpperCAmelCase : Dict = mam_aaa['''model'''] remove_ignore_keys_(UpperCAmelCase ) UpperCAmelCase : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0] UpperCAmelCase : Optional[int] = MaMaaaConfig( vocab_size=UpperCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) UpperCAmelCase : List[str] = state_dict['''decoder.embed_tokens.weight'''] UpperCAmelCase : Dict = MaMaaaForConditionalGeneration(UpperCAmelCase ) model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) UpperCAmelCase : int = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") _lowerCamelCase : Union[str, Any] = parser.parse_args() _lowerCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
336
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase (lowercase_: Any ) -> int: A__ : Optional[Any] = 384 A__ : Union[str, Any] = 7 if "tiny" in model_name: A__ : List[str] = 96 A__ : Optional[int] = (2, 2, 6, 2) A__ : str = (3, 6, 12, 24) elif "small" in model_name: A__ : List[str] = 96 A__ : Optional[int] = (2, 2, 18, 2) A__ : str = (3, 6, 12, 24) elif "base" in model_name: A__ : Tuple = 128 A__ : Optional[Any] = (2, 2, 18, 2) A__ : str = (4, 8, 16, 32) A__ : Optional[int] = 12 A__ : Union[str, Any] = 512 elif "large" in model_name: A__ : Dict = 192 A__ : str = (2, 2, 18, 2) A__ : List[str] = (6, 12, 24, 48) A__ : str = 12 A__ : Tuple = 768 # set label information A__ : Optional[Any] = 150 A__ : Union[str, Any] = """huggingface/label-files""" A__ : List[str] = """ade20k-id2label.json""" A__ : int = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) A__ : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()} A__ : Union[str, Any] = {v: k for k, v in idalabel.items()} A__ : str = SwinConfig( embed_dim=lowercase_ , depths=lowercase_ , num_heads=lowercase_ , window_size=lowercase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) A__ : Union[str, Any] = UperNetConfig( backbone_config=lowercase_ , auxiliary_in_channels=lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ , ) return config def UpperCamelCase (lowercase_: int ) -> Optional[int]: A__ : List[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def UpperCamelCase (lowercase_: Any , lowercase_: Tuple , lowercase_: Any ) -> int: A__ : Dict = dct.pop(lowercase_ ) A__ : str = val def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str] ) -> Tuple: A__ : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ : Optional[Any] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) A__ : Optional[int] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A__ : List[str] = in_proj_weight[:dim, :] A__ : List[str] = in_proj_bias[: dim] A__ : str = in_proj_weight[ dim : dim * 2, : ] A__ : Optional[int] = in_proj_bias[ dim : dim * 2 ] A__ : Tuple = in_proj_weight[ -dim :, : ] A__ : List[str] = in_proj_bias[-dim :] # fmt: on def UpperCamelCase (lowercase_: Union[str, Any] ) -> Any: A__ , A__ : List[Any] = x.shape A__ : Optional[int] = x.reshape(lowercase_ , 4 , in_channel // 4 ) A__ : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase_ , lowercase_ ) return x def UpperCamelCase (lowercase_: Union[str, Any] ) -> Optional[Any]: A__ , A__ : Any = x.shape A__ : List[Any] = x.reshape(lowercase_ , in_channel // 4 , 4 ) A__ : Any = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase_ , lowercase_ ) return x def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[Any]: A__ : int = x.shape[0] A__ : Dict = x.reshape(4 , in_channel // 4 ) A__ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase_ ) return x def UpperCamelCase (lowercase_: str ) -> Tuple: A__ : int = x.shape[0] A__ : Tuple = x.reshape(in_channel // 4 , 4 ) A__ : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase_ ) return x def UpperCamelCase (lowercase_: List[Any] , lowercase_: Dict , lowercase_: int ) -> List[str]: A__ : Union[str, Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } A__ : Optional[int] = model_name_to_url[model_name] A__ : Tuple = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" , file_name=lowercase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowercase_ , param.shape ) A__ : Optional[int] = get_upernet_config(lowercase_ ) A__ : str = UperNetForSemanticSegmentation(lowercase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ : str = state_dict.pop(lowercase_ ) if "bn" in key: A__ : str = key.replace("""bn""" , """batch_norm""" ) A__ : str = val # rename keys A__ : Tuple = create_rename_keys(lowercase_ ) for src, dest in rename_keys: rename_key(lowercase_ , lowercase_ , lowercase_ ) read_in_q_k_v(lowercase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ : List[Any] = reverse_correct_unfold_reduction_order(lowercase_ ) if "norm" in key: A__ : Optional[Any] = reverse_correct_unfold_norm_order(lowercase_ ) model.load_state_dict(lowercase_ ) # verify on image A__ : Any = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" A__ : str = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) A__ : int = SegformerImageProcessor() A__ : int = processor(lowercase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): A__ : Union[str, Any] = model(lowercase_ ) A__ : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ : Dict = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ : List[str] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ : List[Any] = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase_ ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": A_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f'''upernet-swin-{size}''' for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A_ : Any = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
141
from __future__ import annotations def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]: A__ : Tuple = list(range(len(lowercase_ ) ) ) A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ : float = 0 A__ : list[float] = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: A__ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
141
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase : str ={ '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, } lowerCAmelCase : List[str] ={ '''moussaKam/mbarthez''': 1_024, '''moussaKam/barthez''': 1_024, '''moussaKam/barthez-orangesum-title''': 1_024, } lowerCAmelCase : List[str] ='''▁''' class a_ ( a__ ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['input_ids', 'attention_mask'] def __init__( self : int , lowercase : Tuple , lowercase : Optional[int]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Dict="</s>" , lowercase : Any="<s>" , lowercase : int="<unk>" , lowercase : str="<pad>" , lowercase : str="<mask>" , lowercase : Union[str, Any] = None , **lowercase : Any , ): """simple docstring""" lowercase_ :Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) lowercase_ :Optional[Any] = vocab_file lowercase_ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) lowercase_ :List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase_ :Union[str, Any] = len(self.sp_model ) - 1 lowercase_ :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase__ ( self : Union[str, Any] , lowercase : Any , lowercase : Optional[Any] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ :Tuple = [self.cls_token_id] lowercase_ :List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : Any , lowercase : List[str] , lowercase : List[Any] = None , lowercase : Optional[Any] = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase__ ( self : List[Any] , lowercase : int , lowercase : Dict = None ): """simple docstring""" lowercase_ :List[str] = [self.sep_token_id] lowercase_ :str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[Any] ): """simple docstring""" return len(self.sp_model ) def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : int , lowercase : int ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def lowercase__ ( self : int , lowercase : Union[str, Any] ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ :Dict = self.sp_model.PieceToId(_lowerCamelCase ) return spm_id if spm_id else self.unk_token_id def lowercase__ ( self : Dict , lowercase : Dict ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase__ ( self : Union[str, Any] , lowercase : int ): """simple docstring""" lowercase_ :Optional[Any] = [] lowercase_ :Dict = '''''' lowercase_ :str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token lowercase_ :List[Any] = True lowercase_ :Dict = [] else: current_sub_tokens.append(_lowerCamelCase ) lowercase_ :List[Any] = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def __getstate__( self : List[Any] ): """simple docstring""" lowercase_ :Tuple = self.__dict__.copy() lowercase_ :str = None return state def __setstate__( self : Optional[int] , lowercase : Any ): """simple docstring""" lowercase_ :List[str] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase_ :int = {} lowercase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : Any , lowercase : Dict , lowercase : Dict = None ): """simple docstring""" if not os.path.isdir(_lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowercase_ :Tuple = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , "wb" ) as fi: lowercase_ :Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
223
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : int = logging.get_logger(__name__) a__ : Optional[Any] = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = use_timm_backbone SCREAMING_SNAKE_CASE : Optional[int] = backbone_config SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = d_model SCREAMING_SNAKE_CASE : str = encoder_ffn_dim SCREAMING_SNAKE_CASE : str = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = backbone SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : str = num_feature_levels SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : str = two_stage SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost SCREAMING_SNAKE_CASE : Optional[int] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def __lowerCAmelCase ( self ) ->int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) ->int: return self.d_model def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
313
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Any=36 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=1_000 , ) ->str: '''simple docstring''' A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = patch_size A__ = text_seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = coordinate_size A__ = shape_size A__ = num_labels A__ = num_choices A__ = scope A__ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A__ = text_seq_length A__ = (image_size // patch_size) ** 2 + 1 A__ = self.text_seq_length + self.image_seq_length def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: A__ = bbox[i, j, 3] A__ = bbox[i, j, 1] A__ = t if bbox[i, j, 2] < bbox[i, j, 0]: A__ = bbox[i, j, 2] A__ = bbox[i, j, 0] A__ = t A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.text_seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) A__ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int) ->Tuple: '''simple docstring''' A__ = LayoutLMvaModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() # text + image A__ = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__) A__ = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only A__ = model(pixel_values=UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.num_labels A__ = LayoutLMvaForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any) ->Any: '''simple docstring''' A__ = self.num_labels A__ = LayoutLMvaForTokenClassification(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Optional[int]: '''simple docstring''' A__ = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->Any: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]) ->Tuple: '''simple docstring''' return True def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = LayoutLMvaModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=False) ->Tuple: '''simple docstring''' A__ = copy.deepcopy(UpperCAmelCase__) if model_class in get_values(UpperCAmelCase__): A__ = { k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous() if isinstance(UpperCAmelCase__ , torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) elif model_class in get_values(UpperCAmelCase__): A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) elif model_class in [ *get_values(UpperCAmelCase__), ]: A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) elif model_class in [ *get_values(UpperCAmelCase__), ]: A__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = LayoutLMvaModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''').to(UpperCAmelCase__) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').pixel_values.to(UpperCAmelCase__) A__ = torch.tensor([[1, 2]]) A__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass A__ = model( input_ids=input_ids.to(UpperCAmelCase__) , bbox=bbox.to(UpperCAmelCase__) , pixel_values=pixel_values.to(UpperCAmelCase__) , ) # verify the logits A__ = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__) A__ = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(UpperCAmelCase__) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1e-4))
231
class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' A__ = n A__ = [None] * self.n A__ = 0 # index of the first element A__ = 0 A__ = 0 def __len__( self : List[Any]) ->int: '''simple docstring''' return self.size def SCREAMING_SNAKE_CASE ( self : List[Any]) ->bool: '''simple docstring''' return self.size == 0 def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' return False if self.is_empty() else self.array[self.front] def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->Any: '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''') A__ = data A__ = (self.rear + 1) % self.n self.size += 1 return self def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''') A__ = self.array[self.front] A__ = None A__ = (self.front + 1) % self.n self.size -= 1 return temp
231
1
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __snake_case :Union[str, Any] = get_logger(__name__) class _A : def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = ( os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __a = Extractor def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __a = os.path.abspath(__SCREAMING_SNAKE_CASE) return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool): '''simple docstring''' return force_extract or ( not os.path.isfile(__SCREAMING_SNAKE_CASE) and not (os.path.isdir(__SCREAMING_SNAKE_CASE) and os.listdir(__SCREAMING_SNAKE_CASE)) ) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False): '''simple docstring''' __a = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE) if not extractor_format: return input_path __a = self._get_output_path(__SCREAMING_SNAKE_CASE) if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return output_path class _A ( __UpperCAmelCase ): @classmethod @abstractmethod def _lowerCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : Union[Path, str] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' ... @staticmethod @abstractmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' ... class _A ( __UpperCAmelCase ,__UpperCAmelCase ): UpperCamelCase__ : List[bytes] = [] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' with open(__SCREAMING_SNAKE_CASE , '''rb''') as f: return f.read(__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bytes = b""): '''simple docstring''' if not magic_number: __a = max(len(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers) try: __a = cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) except OSError: return False return any(magic_number.startswith(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers) class _A ( __UpperCAmelCase ): @classmethod def _lowerCamelCase ( cls : Optional[int] , __SCREAMING_SNAKE_CASE : Union[Path, str] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE) @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' def resolved(__SCREAMING_SNAKE_CASE : str) -> str: return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE)) def badpath(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)).startswith(__SCREAMING_SNAKE_CASE) def badlink(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str) -> bool: # Links are interpreted relative to the directory containing the link __a = resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name))) return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE) __a = resolved(__SCREAMING_SNAKE_CASE) for finfo in members: if badpath(finfo.name , __SCREAMING_SNAKE_CASE): logger.error(F'Extraction of {finfo.name} is blocked (illegal path)') elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}') elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}') else: yield finfo @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) __a = tarfile.open(__SCREAMING_SNAKE_CASE) tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) tar_file.close() class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = [B'''\x1F\x8B'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' with gzip.open(__SCREAMING_SNAKE_CASE , '''rb''') as gzip_file: with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bytes = b""): '''simple docstring''' if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__SCREAMING_SNAKE_CASE , '''rb''') as fp: __a = _EndRecData(__SCREAMING_SNAKE_CASE) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __a = fp.read(__SCREAMING_SNAKE_CASE) # CD is where we expect it to be if len(__SCREAMING_SNAKE_CASE) == sizeCentralDir: __a = struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''r''') as zip_file: zip_file.extractall(__SCREAMING_SNAKE_CASE) zip_file.close() class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' with lzma.open(__SCREAMING_SNAKE_CASE) as compressed_file: with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : int = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''') import rarfile os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) __a = rarfile.RarFile(__SCREAMING_SNAKE_CASE) rf.extractall(__SCREAMING_SNAKE_CASE) rf.close() class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[Any] = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''') import zstandard as zstd __a = zstd.ZstdDecompressor() with open(__SCREAMING_SNAKE_CASE , '''rb''') as ifh, open(__SCREAMING_SNAKE_CASE , '''wb''') as ofh: dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : str = [B'''\x42\x5A\x68'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' with bza.open(__SCREAMING_SNAKE_CASE , '''rb''') as compressed_file: with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''') import pyazr os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , '''r''') as archive: archive.extractall(__SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : int = [B'''\x04\x22\x4D\x18'''] @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]): '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''') import lza.frame with lza.frame.open(__SCREAMING_SNAKE_CASE , '''rb''') as compressed_file: with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) class _A : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) UpperCamelCase__ : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowerCamelCase ( cls : int): '''simple docstring''' return max( len(__SCREAMING_SNAKE_CASE) for extractor in cls.extractors.values() if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for extractor_magic_number in extractor.magic_numbers) @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE) except OSError: return b"" @classmethod def _lowerCamelCase ( cls : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bool = False): '''simple docstring''' warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=__SCREAMING_SNAKE_CASE , ) __a = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Union[Path, str]): # <Added version="2.4.0"/> '''simple docstring''' __a = cls._get_magic_number_max_length() __a = cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE): return extractor_format @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[BaseExtractor] = "deprecated" , ): '''simple docstring''' os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE) , exist_ok=__SCREAMING_SNAKE_CASE) # Prevent parallel extractions __a = str(Path(__SCREAMING_SNAKE_CASE).with_suffix('''.lock''')) with FileLock(__SCREAMING_SNAKE_CASE): shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=__SCREAMING_SNAKE_CASE , ) __a = extractor if extractor != '''deprecated''' else extractor_format else: __a = cls.extractors[extractor_format] return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=__SCREAMING_SNAKE_CASE , ) for extractor in cls.extractors.values(): if extractor.is_extractable(__SCREAMING_SNAKE_CASE): return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
49
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer lowerCamelCase :Optional[int] = False def UpperCAmelCase ( self ) -> Optional[int]: super().setUp() _A = ( """<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """ """ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """ """ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """ """oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """ """pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """ """yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """ """əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """ """ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """ """ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """ """uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """ """ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """ """ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """ """ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4""" ).split(""" """ ) _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]: _A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )] _A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) ) if max_length is not None and len(lowerCAmelCase_ ) > max_length: _A = toks[:max_length] if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0: while len(lowerCAmelCase_ ) < min_length: _A = toks + toks # toks_str = [t[1] for t in toks] _A = [t[0] for t in toks] # Ensure consistency _A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) if " " not in output_txt and len(lowerCAmelCase_ ) > 1: _A = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ ) ) if with_prefix_space: _A = """ """ + output_txt _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) return output_txt, output_ids def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any: kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[str]: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) # check adding a single token tokenizer.add_tokens("""xxx""" ) _A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] ) _A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa _A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3) def UpperCAmelCase ( self ) -> int: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" ) def UpperCAmelCase ( self ) -> List[str]: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids ) def UpperCAmelCase ( self ) -> List[str]: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) _A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Tuple: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) _A = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] _A = tokenizer.decode(sample_ids[0] ) _A = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] ) def UpperCAmelCase ( self ) -> str: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" ) def UpperCAmelCase ( self ) -> Tuple: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids ) def UpperCAmelCase ( self ) -> List[Any]: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) # fmt: off _A = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter _A = tokenizer.decode(sample_ids[0] ) _A = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] ) # decode with no word_del_token filter _A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ ) _A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] ) def UpperCAmelCase ( self ) -> Dict: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) _A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Tuple: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) _A = """Hello how are you""" _A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" ) _A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ ) self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Tuple: _A = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ ) _A = """Hello how are you""" _A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids _A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" ) self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" ) def UpperCAmelCase ( self ) -> Any: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) _A = """Hello how Are you""" _A = """hello how are you""" _A = tokenizer(lowerCAmelCase_ ).input_ids _A = tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[str]: _A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) tokenizer.add_tokens(["""!""", """?"""] ) tokenizer.add_special_tokens({"""cls_token""": """$$$"""} ) # fmt: off _A = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on _A = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] ) @staticmethod def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = [d[key] for d in offsets] return retrieved_list def UpperCAmelCase ( self ) -> Tuple: _A = self.get_tokenizer(word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" _A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on _A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""char_offsets""" in outputs ) self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.get_tokenizer(word_delimiter_token="""|""" ) def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) ) # transform list to ModelOutput _A = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] ) def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): [recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )] self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] ) # fmt: off _A = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char _A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) _A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids] check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ) @unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" ) def UpperCAmelCase ( self ) -> int: pass @unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" ) def UpperCAmelCase ( self ) -> List[str]: pass @unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" ) def UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" ) def UpperCAmelCase ( self ) -> Optional[int]: pass def UpperCAmelCase ( self ) -> List[Any]: _A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = tokenizer.vocab_size _A = len(lowerCAmelCase_ ) self.assertNotEqual(lowerCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] _A = tokenizer.add_tokens(lowerCAmelCase_ ) _A = tokenizer.vocab_size _A = len(lowerCAmelCase_ ) self.assertNotEqual(lowerCAmelCase_ , 0 ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) ) _A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ ) self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} _A = tokenizer.add_special_tokens(lowerCAmelCase_ ) _A = tokenizer.vocab_size _A = len(lowerCAmelCase_ ) self.assertNotEqual(lowerCAmelCase_ , 0 ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) ) _A = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ ) self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" ) def UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" ) def UpperCAmelCase ( self ) -> Union[str, Any]: pass def UpperCAmelCase ( self ) -> str: # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. _A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""] _A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ ) self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
180
0
from __future__ import annotations def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ): """simple docstring""" A_ , A_ = set(__UpperCamelCase ), [start] while stack: A_ = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored __a :int = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int: super().__init__() self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) # create a imagenet -> id dictionary for easier use a : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): a : int = int(lowerCAmelCase__ ) a : Any = dict(sorted(self.labels.items() ) ) def __a ( self , lowerCAmelCase__ ) -> List[int]: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Optional[Any] = list(lowerCAmelCase__ ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]: a : Dict = len(lowerCAmelCase__ ) a : Tuple = self.transformer.config.sample_size a : Tuple = self.transformer.config.in_channels a : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , ) a : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents a : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 ) a : str = torch.tensor([1000] * batch_size , device=self.device ) a : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowerCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: a : Any = latent_model_input[: len(lowerCAmelCase__ ) // 2] a : Tuple = torch.cat([half, half] , dim=0 ) a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = t if not torch.is_tensor(lowerCAmelCase__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) a : Optional[int] = latent_model_input.device.type == "mps" if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Union[str, Any] = torch.floataa if is_mps else torch.floataa else: a : str = torch.intaa if is_mps else torch.intaa a : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: a : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML a : Dict = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output a : Union[str, Any] = self.transformer( lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample # perform guidance if guidance_scale > 1: a, a : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] a, a : Union[str, Any] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 ) a : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps) a : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 ) a : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: a, a : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 ) else: a : Any = noise_pred # compute previous image: x_t -> x_t-1 a : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample if guidance_scale > 1: a, a : Tuple = latent_model_input.chunk(2 , dim=0 ) else: a : Tuple = latent_model_input a : Optional[Any] = 1 / self.vae.config.scaling_factor * latents a : Any = self.vae.decode(lowerCAmelCase__ ).sample a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a : int = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowerCAmelCase__ )
105
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]: '''simple docstring''' a__ : Optional[int] = parent a__ : List[str] = batch_size a__ : List[str] = image_size a__ : Dict = patch_size a__ : Optional[Any] = num_channels a__ : List[Any] = is_training a__ : str = use_labels a__ : Dict = hidden_size a__ : Tuple = num_hidden_layers a__ : Tuple = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : List[str] = hidden_dropout_prob a__ : Any = attention_probs_dropout_prob a__ : Dict = type_sequence_label_size a__ : Tuple = initializer_range a__ : Optional[int] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : List[str] = (image_size // patch_size) ** 2 a__ : Any = num_patches + 1 def __lowercase ( self) -> int: '''simple docstring''' a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ : Tuple = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : List[str] = self.get_config() return config, pixel_values, labels def __lowercase ( self) -> Optional[int]: '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__ : int = ViTMSNModel(config=lowercase) model.to(lowercase) model.eval() a__ : int = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple: '''simple docstring''' a__ : Optional[Any] = self.type_sequence_label_size a__ : List[str] = ViTMSNForImageClassification(lowercase) model.to(lowercase) model.eval() a__ : int = model(lowercase , labels=lowercase) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}') print('Labels: {labels}') self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images a__ : List[str] = 1 a__ : Optional[int] = ViTMSNForImageClassification(lowercase) model.to(lowercase) model.eval() a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) a__ : Dict = model(lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : List[Any] = self.prepare_config_and_inputs() a__ , a__ , a__ : Optional[Any] = config_and_inputs a__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __A : Tuple = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __A : List[str] = False __A : Optional[Any] = False __A : Union[str, Any] = False __A : Any = False def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Optional[int] = ViTMSNModelTester(self) a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37) def __lowercase ( self) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds') def __lowercase ( self) -> Any: '''simple docstring''' pass def __lowercase ( self) -> Tuple: '''simple docstring''' a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(lowercase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear)) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowercase) a__ : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : str = [*signature.parameters.keys()] a__ : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def __lowercase ( self) -> str: '''simple docstring''' a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> str: '''simple docstring''' a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase) @slow def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = ViTMSNModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) def A_ ( ) -> Dict: a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self) -> List[Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None @slow def __lowercase ( self) -> List[Any]: '''simple docstring''' torch.manual_seed(2) a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase) a__ : Any = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase) # forward pass with torch.no_grad(): a__ : Tuple = model(**lowercase) # verify the logits a__ : Union[str, Any] = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase) a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
99
0
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __UpperCamelCase ( _A : Dict ) ->str: """simple docstring""" return EnvironmentCommand() class _SCREAMING_SNAKE_CASE ( lowerCAmelCase_): @staticmethod def _snake_case ( _SCREAMING_SNAKE_CASE )-> str: lowerCamelCase_ =parser.add_parser("""env""" ) download_parser.set_defaults(func=__lowerCAmelCase ) def _snake_case ( self )-> int: lowerCamelCase_ =huggingface_hub.__version__ lowerCamelCase_ ="""not installed""" lowerCamelCase_ ="""NA""" if is_torch_available(): import torch lowerCamelCase_ =torch.__version__ lowerCamelCase_ =torch.cuda.is_available() lowerCamelCase_ ="""not installed""" if is_transformers_available(): import transformers lowerCamelCase_ =transformers.__version__ lowerCamelCase_ ="""not installed""" if is_accelerate_available(): import accelerate lowerCamelCase_ =accelerate.__version__ lowerCamelCase_ ="""not installed""" if is_xformers_available(): import xformers lowerCamelCase_ =xformers.__version__ lowerCamelCase_ ={ """`diffusers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})', """Huggingface_hub version""": hub_version, """Transformers version""": transformers_version, """Accelerate version""": accelerate_version, """xFormers version""": xformers_version, """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(__lowerCAmelCase ) ) return info @staticmethod def _snake_case ( _SCREAMING_SNAKE_CASE )-> int: return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
358
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __A : Union[str, Any] = 'src/diffusers' # Matches is_xxx_available() __A : Dict = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla __A : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') __A : Optional[Any] = '\n{0} = None\n' __A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' __A : Tuple = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __UpperCamelCase ( _A : Optional[Any] ) ->Dict: """simple docstring""" lowerCamelCase_ =_re_backend.findall(_A ) if len(_A ) == 0: return None return "_and_".join(_A ) def __UpperCamelCase ( ) ->Optional[int]: """simple docstring""" with open(os.path.join(_A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(_A ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(_A ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(_A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_A ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def __UpperCamelCase ( _A : Union[str, Any] , _A : int ) ->Optional[Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_A ) elif name.islower(): return DUMMY_FUNCTION.format(_A , _A ) else: return DUMMY_CLASS.format(_A , _A ) def __UpperCamelCase ( _A : Any=None ) ->Any: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ="""[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase_ ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def __UpperCamelCase ( _A : Dict=False ) ->Tuple: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(_A , """utils""" ) lowerCamelCase_ ={ backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_A ): with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ="""""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main ' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` ' """to fix this.""" ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A : str = parser.parse_args() check_dummies(args.fix_and_overwrite)
49
0
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __UpperCamelCase ( lowercase__ : str, lowercase__ : List[Any], lowercase__ : Dict ): '''simple docstring''' if gpta_config_file == "": __lowercase =GPTaConfig() else: __lowercase =GPTaConfig.from_json_file(lowercase__ ) __lowercase =GPTaModel(lowercase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowercase__, lowercase__, lowercase__ ) # Save pytorch-model __lowercase =pytorch_dump_folder_path + '/' + WEIGHTS_NAME __lowercase =pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict(), lowercase__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(lowercase__, 'w', encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) UpperCAmelCase = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
141
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } UpperCAmelCase = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class lowerCAmelCase ( A ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = RealmTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ): """simple docstring""" super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) __lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __lowercase ) != do_lower_case or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars ): __lowercase =getattr(__lowercase , normalizer_state.pop('type' ) ) __lowercase =do_lower_case __lowercase =strip_accents __lowercase =tokenize_chinese_chars __lowercase =normalizer_class(**__lowercase ) __lowercase =do_lower_case def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ): """simple docstring""" __lowercase =PaddingStrategy.MAX_LENGTH __lowercase =text __lowercase =kwargs.pop('text_pair' , __lowercase ) __lowercase =kwargs.pop('return_tensors' , __lowercase ) __lowercase ={ 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(__lowercase ): if batch_text_pair is not None: __lowercase =batch_text_pair[idx] else: __lowercase =None __lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase ) __lowercase =encoded_candidates.get('input_ids' ) __lowercase =encoded_candidates.get('attention_mask' ) __lowercase =encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(__lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__lowercase ) __lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0} return BatchEncoding(__lowercase , tensor_type=__lowercase ) def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ): """simple docstring""" __lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" __lowercase =[self.sep_token_id] __lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" __lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
141
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a_ ( lowerCamelCase ): lowercase = """Salesforce/blip-image-captioning-base""" lowercase = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) lowercase = """image_captioner""" lowercase = AutoModelForVisionaSeq lowercase = ["""image"""] lowercase = ["""text"""] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" requires_backends(self , ["""vision"""] ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.model.generate(**_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
183
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
183
1
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" while second != 0: lowerCAmelCase_ = first & second first ^= second lowerCAmelCase_ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() _A = int(input("Enter the first number: ").strip()) _A = int(input("Enter the second number: ").strip()) print(f"""{add(first, second) = }""")
231
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()["__file__"], _import_structure)
231
1
"""simple docstring""" import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ ( __a ): '''simple docstring''' __snake_case = '''facebook/bart-large-mnli''' __snake_case = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) __snake_case = '''text_classifier''' __snake_case = AutoTokenizer __snake_case = AutoModelForSequenceClassification __snake_case = ['''text''', ['''text''']] __snake_case = ['''text'''] def __lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" super().setup() a = self.model.config a = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): a = int(UpperCamelCase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any ) ->Any: """simple docstring""" a = labels return self.pre_processor( [text] * len(UpperCamelCase__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Any ) ->Union[str, Any]: """simple docstring""" a = outputs.logits a = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
368
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
0
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
1
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=13 , __a : str=64 , __a : List[Any]=2 , __a : int=3 , __a : Union[str, Any]=True , __a : Tuple=True , __a : Tuple=32 , __a : Dict=5 , __a : str=4 , __a : List[str]=37 , __a : int="gelu" , __a : Union[str, Any]=0.1 , __a : List[Any]=0.1 , __a : Optional[int]=10 , __a : int=0.02 , __a : List[str]=[1, 16, 4, 4] , __a : Tuple=None , ) -> int: """simple docstring""" __lowercase : Union[str, Any] = parent __lowercase : str = batch_size __lowercase : Tuple = image_size __lowercase : Any = patch_size __lowercase : int = num_channels __lowercase : List[str] = is_training __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : List[str] = num_attention_heads __lowercase : Optional[Any] = intermediate_size __lowercase : Optional[int] = hidden_act __lowercase : str = hidden_dropout_prob __lowercase : Tuple = attention_probs_dropout_prob __lowercase : Optional[Any] = type_sequence_label_size __lowercase : Dict = initializer_range __lowercase : Optional[Any] = scope __lowercase : int = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __lowercase : List[Any] = (self.image_size // 32) ** 2 __lowercase : Tuple = num_patches + 1 def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : int = None if self.use_labels: __lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : str = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : List[Any] , __a : Tuple ) -> str: """simple docstring""" __lowercase : str = ViTHybridModel(config=__a ) model.to(__a ) model.eval() __lowercase : Any = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[str] , __a : int , __a : Any , __a : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.type_sequence_label_size __lowercase : List[str] = ViTHybridForImageClassification(__a ) model.to(__a ) model.eval() __lowercase : Tuple = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" __lowercase : Optional[int] = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase : List[Any] = config_and_inputs __lowercase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _A : Optional[int] = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) _A : Any = False _A : List[str] = False _A : Any = False def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Dict = ViTHybridModelTester(self ) __lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCAmelCase ( self : int ) -> int: """simple docstring""" pass def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Union[str, Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowercase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[int] = model_class(__a ) __lowercase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : List[Any] = _config_zero_init(__a ) for model_class in self.all_model_classes: __lowercase : Union[str, Any] = model_class(config=__a ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __lowercase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Optional[Any] = ViTHybridModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __a ) __lowercase : Union[str, Any] = self.default_image_processor __lowercase : List[str] = prepare_img() __lowercase : str = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : Any = model(**__a ) # verify the logits __lowercase : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) @slow @require_accelerate def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" __lowercase : List[str] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) __lowercase : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" ) __lowercase : int = prepare_img() __lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ) __lowercase : Any = model(**__a ) __lowercase : Dict = outputs.logits # model predicts one of the 1000 ImageNet classes __lowercase : str = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
1
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowerCamelCase : str = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class SCREAMING_SNAKE_CASE : """simple docstring""" _SCREAMING_SNAKE_CASE = PegasusConfig _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = """gelu""" def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=9_9 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=2_0 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : int=0 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = eos_token_id UpperCamelCase = pad_token_id UpperCamelCase = bos_token_id def A ( self : Any ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, inputs_dict def A ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = 2_0 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] ) UpperCamelCase , UpperCamelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = 2_0 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] ) UpperCamelCase , UpperCamelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> Optional[int]: """simple docstring""" if attention_mask is None: UpperCamelCase = np.not_equal(A__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : str ): """simple docstring""" UpperCamelCase = FlaxPegasusModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ ) def A ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : List[Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = model_class(UpperCamelCase__ ) @jax.jit def encode_jitted(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Optional[int] ): return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) with self.subTest('JIT Enabled' ): UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def A ( self : List[str] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) UpperCamelCase = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ): return model.decode( decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , ) with self.subTest('JIT Enabled' ): UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def A ( self : Any ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCamelCase__ ) UpperCamelCase = np.ones((1, 1) ) UpperCamelCase = model(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @slow def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) UpperCamelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) UpperCamelCase = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] UpperCamelCase = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='np' , truncation=UpperCamelCase__ , max_length=5_1_2 , padding=UpperCamelCase__ ) UpperCamelCase = model.generate(**UpperCamelCase__ , num_beams=2 ).sequences UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) assert tgt_text == decoded
28
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__UpperCAmelCase ) class A ( __UpperCAmelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __snake_case = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} ) __snake_case = Features({'text': Value('string' )} ) __snake_case = Features({'summary': Value('string' )} ) __snake_case = "text" __snake_case = "summary" @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return {self.text_column: "text", self.summary_column: "summary"}
167
from abc import ABC, abstractmethod from argparse import ArgumentParser class A ( __UpperCAmelCase ): @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): """simple docstring""" raise NotImplementedError() @abstractmethod def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" raise NotImplementedError()
167
1
"""simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
183
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _SCREAMING_SNAKE_CASE : Union[str, Any] = '''CompVis/stable-diffusion-v1-1''' _SCREAMING_SNAKE_CASE : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' _SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-3''' _SCREAMING_SNAKE_CASE : str = '''CompVis/stable-diffusion-v1-4''' class a ( __snake_case ): def __init__( self : int , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: super()._init_() lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = StableDiffusionPipeline( vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCamelCase ( self : List[str] ) -> Dict[str, Any]: return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('_' )} def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> Any: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Any ) -> List[Any]: self.enable_attention_slicing(__SCREAMING_SNAKE_CASE ) @torch.no_grad() def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple: return self.pipea( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @torch.no_grad() def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]: return self.pipea( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @torch.no_grad() def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple: return self.pipea( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @torch.no_grad() def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple: return self.pipea( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @torch.no_grad() def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> str: lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(__SCREAMING_SNAKE_CASE ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCamelCase_ = self.textaimg_sda_a( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCamelCase_ = self.textaimg_sda_a( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCamelCase_ = self.textaimg_sda_a( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCamelCase_ = self.textaimg_sda_a( prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
183
1
'''simple docstring''' import functools from typing import Any def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): # Validation if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or len(UpperCamelCase__ ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not all( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie UpperCAmelCase__ : dict[str, Any] = {} UpperCAmelCase__ : Union[str, Any] = """WORD_KEEPER""" for word in words: UpperCAmelCase__ : Any = trie for c in word: if c not in trie_node: UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Tuple = trie_node[c] UpperCAmelCase__ : Any = True UpperCAmelCase__ : Optional[int] = len(UpperCamelCase__ ) # Dynamic programming method @functools.cache def is_breakable(UpperCamelCase__ ) -> bool: if index == len_string: return True UpperCAmelCase__ : Tuple = trie for i in range(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Tuple = trie_node.get(string[i] , UpperCamelCase__ ) if trie_node is None: return False if trie_node.get(UpperCamelCase__ , UpperCamelCase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
283
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType __A , __A , __A =False, False, False @dataclass class _snake_case : lowerCAmelCase :Optional[int] = None lowerCAmelCase :bool = True lowerCAmelCase :bool = True lowerCAmelCase :Optional[str] = None # Automatically constructed lowerCAmelCase :ClassVar[str] = "dict" lowerCAmelCase :ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase :str = field(default='''Audio''' , init=a__ , repr=a__ ) def __call__( self): return self.pa_type def snake_case__ ( self , _lowerCamelCase): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err if isinstance(_lowerCamelCase , _lowerCamelCase): return {"bytes": None, "path": value} elif isinstance(_lowerCamelCase , _lowerCamelCase): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase__ : Optional[int] = BytesIO() sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""") return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""") is not None and os.path.isfile(value["""path"""]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm"""): # "PCM" only has raw audio bytes if value.get("""sampling_rate""") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""") if value.get("""bytes"""): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase__ : Tuple = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767 else: UpperCAmelCase__ : List[str] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767 UpperCAmelCase__ : List[str] = BytesIO(bytes()) sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""")} elif value.get("""bytes""") is not None or value.get("""path""") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes"""), "path": value.get("""path""")} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''') def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""") UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''') try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err UpperCAmelCase__ : Dict = xsplitext(_lowerCamelCase)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """) if file is None: UpperCAmelCase__ : int = token_per_repo_id or {} UpperCAmelCase__ : Optional[int] = path.split("""::""")[-1] try: UpperCAmelCase__ : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""] UpperCAmelCase__ : Dict = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase__ : List[Any] = None with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase) as f: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sf.read(_lowerCamelCase) else: UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = sf.read(_lowerCamelCase) UpperCAmelCase__ : str = array.T if self.mono: UpperCAmelCase__ : List[Any] = librosa.to_mono(_lowerCamelCase) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase__ : int = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate) UpperCAmelCase__ : Tuple = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def snake_case__ ( self): from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""") return { "bytes": Value("""binary"""), "path": Value("""string"""), } def snake_case__ ( self , _lowerCamelCase): if pa.types.is_string(storage.type): UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase) , type=pa.binary()) UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string()) UpperCAmelCase__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""): UpperCAmelCase__ : Optional[Any] = pa.array([Audio().encode_example(_lowerCamelCase) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("""bytes""") >= 0: UpperCAmelCase__ : int = storage.field("""bytes""") else: UpperCAmelCase__ : List[str] = pa.array([None] * len(_lowerCamelCase) , type=pa.binary()) if storage.type.get_field_index("""path""") >= 0: UpperCAmelCase__ : List[Any] = storage.field("""path""") else: UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string()) UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null()) return array_cast(_lowerCamelCase , self.pa_type) def snake_case__ ( self , _lowerCamelCase): @no_op_if_value_is_null def path_to_bytes(_lowerCamelCase): with xopen(_lowerCamelCase , """rb""") as f: UpperCAmelCase__ : int = f.read() return bytes_ UpperCAmelCase__ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase__ : Optional[Any] = pa.array( [os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , ) UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null()) return array_cast(_lowerCamelCase , self.pa_type)
283
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
84
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
0
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class A_ ( _a ): def __init__( self: Tuple ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: Any ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Optional[int] = field _lowerCamelCase : Dict = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Dict = Json( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,field=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: List[str] ): '''simple docstring''' if self.streaming: _lowerCamelCase : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Any = None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Tuple = None _lowerCamelCase : int = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Optional[Any] = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: str ,): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _lowerCamelCase : List[Any] = dataset _lowerCamelCase : Optional[Any] = path_or_buf _lowerCamelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowerCamelCase : Dict = num_proc _lowerCamelCase : List[Any] = "utf-8" _lowerCamelCase : Any = to_json_kwargs def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = self.to_json_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.to_json_kwargs.pop("orient" ,"records" ) _lowerCamelCase : Optional[Any] = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) _lowerCamelCase : Union[str, Any] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) _lowerCamelCase : int = self.to_json_kwargs.pop("compression" ,__lowerCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=__lowerCAmelCase ) as buffer: _lowerCamelCase : Union[str, Any] = self._write(file_obj=__lowerCAmelCase ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" " was passed. Please provide a local path instead." ) _lowerCamelCase : List[str] = self._write( file_obj=self.path_or_buf ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**self.to_json_kwargs ) return written def _lowercase ( self: int ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = args _lowerCamelCase : Optional[Any] = query_table( table=self.dataset.data ,key=slice(__lowerCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _lowerCamelCase : str = batch.to_pandas().to_json( path_or_buf=__lowerCAmelCase ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**__lowerCAmelCase ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Dict ,): '''simple docstring''' _lowerCamelCase : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): _lowerCamelCase : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__lowerCAmelCase ) else: _lowerCamelCase, _lowerCamelCase : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,__lowerCAmelCase ,__lowerCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(__lowerCAmelCase ) return written
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __UpperCAmelCase : def __init__( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: int=None , UpperCAmelCase_: Any="resnet50" , UpperCAmelCase_: str=3 , UpperCAmelCase_: Union[str, Any]=32 , UpperCAmelCase_: str=3 , UpperCAmelCase_: Any=True , UpperCAmelCase_: int=True , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else [4] _SCREAMING_SNAKE_CASE = stage_names _SCREAMING_SNAKE_CASE = out_features _SCREAMING_SNAKE_CASE = backbone _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = use_pretrained_backbone _SCREAMING_SNAKE_CASE = is_training def UpperCamelCase ( self: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = TimmBackbone(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs _SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): __snake_case : int = (TimmBackbone,) if is_torch_available() else () __snake_case : Tuple = {"feature-extraction": TimmBackbone} if is_torch_available() else {} __snake_case : Union[str, Any] = False __snake_case : int = False __snake_case : Union[str, Any] = False __snake_case : Optional[Any] = False def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = TimmBackboneModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def UpperCamelCase ( self: List[str] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self: List[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = """resnet18""" _SCREAMING_SNAKE_CASE = """microsoft/resnet-18""" _SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(UpperCAmelCase_ , use_timm_backbone=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(UpperCAmelCase_ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) _SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(UpperCAmelCase_ , use_timm_backbone=UpperCAmelCase_ , out_indices=[1, 2, 3] ) _SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(UpperCAmelCase_ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def UpperCamelCase ( self: List[Any] ): '''simple docstring''' pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def UpperCamelCase ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def UpperCamelCase ( self: str ): '''simple docstring''' pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' pass @unittest.skip("""Safetensors is not supported by timm.""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase ( self: Tuple ): '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = self.has_attentions # no need to test all models as different heads yield the same functionality _SCREAMING_SNAKE_CASE = self.all_model_classes[0] _SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = outputs[0][-1] # Encoder-/Decoder-only models _SCREAMING_SNAKE_CASE = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _SCREAMING_SNAKE_CASE = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase_ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def UpperCamelCase ( self: Dict ): '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None _SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights _SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
306
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' _SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' _SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ ) def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = np.asarray(weights[0] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,) set_param( torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,) set_param( torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,) def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = np.asarray(weights[0] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[2] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,) set_param( torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,) set_param( torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,) set_param( torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,) def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = weights[0][0][0] _SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] ) _SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,) # lsh weights + output _SCREAMING_SNAKE_CASE = weights[0][1] if len(snake_case__ ) < 4: set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ ) else: set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ ) # intermediate weighs _SCREAMING_SNAKE_CASE = weights[2][0][1][2] # Chunked Feed Forward if len(snake_case__ ) == 4: _SCREAMING_SNAKE_CASE = intermediate_weights[2] # layernorm 2 _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] ) _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,) # intermediate dense _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] ) _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,) # intermediate out _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] ) _SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,) def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = torch_model.reformer # word embeds _SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,) if isinstance(weights[3] ,snake_case__ ): _SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' _SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) ) _SCREAMING_SNAKE_CASE = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( snake_case__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ ) # output layer norm _SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,) # output embeddings _SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] ) _SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,) def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) _SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ ) with open(snake_case__ ,"""rb""" ) as f: _SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""] set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() ,snake_case__ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
306
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
209
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast A__ : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowercase__ ( datasets.BuilderConfig ): _UpperCAmelCase :int = 10000 _UpperCAmelCase :Optional[List[str]] = None _UpperCAmelCase :Optional[datasets.Features] = None class lowercase__ ( datasets.ArrowBasedBuilder ): _UpperCAmelCase :Optional[int] = ParquetConfig def UpperCAmelCase__ ( self : Optional[int] ): return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCamelCase_ : str =dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case__ , (str, list, tuple) ): lowerCamelCase_ : Dict =data_files if isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : List[Any] =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ : Any =[dl_manager.iter_files(snake_case__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCamelCase_ : Optional[int] =[] for split_name, files in data_files.items(): if isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : Optional[int] =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ : int =[dl_manager.iter_files(snake_case__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(snake_case__ ): with open(snake_case__ , "rb" ) as f: lowerCamelCase_ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) ) break splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) ) return splits def UpperCAmelCase__ ( self : int , snake_case__ : pa.Table ): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCamelCase_ : List[str] =table_cast(snake_case__ , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ): lowerCamelCase_ : Tuple =self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ): with open(snake_case__ , "rb" ) as f: lowerCamelCase_ : List[str] =pq.ParquetFile(snake_case__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): lowerCamelCase_ : Union[str, Any] =pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" ) raise
209
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase ( unittest.TestCase): def __init__( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=7 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Optional[int]=18 , _lowerCamelCase : Dict=30 , _lowerCamelCase : Union[str, Any]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=True , ): """simple docstring""" A_ : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} A_ : Any = parent A_ : Any = batch_size A_ : str = num_channels A_ : Any = image_size A_ : Any = min_resolution A_ : Tuple = max_resolution A_ : List[str] = do_resize A_ : Optional[Any] = size A_ : Optional[Any] = apply_ocr def a_ ( self : List[Any] ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowercase ( __UpperCAmelCase , unittest.TestCase): __lowerCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a_ ( self : Dict ): """simple docstring""" A_ : Dict = LayoutLMvaImageProcessingTester(self ) @property def a_ ( self : Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self : Union[str, Any] ): """simple docstring""" A_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) ) def a_ ( self : Any ): """simple docstring""" A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def a_ ( self : Union[str, Any] ): """simple docstring""" pass def a_ ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , _lowerCamelCase ) self.assertIsInstance(encoding.boxes , _lowerCamelCase ) # Test batched A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def a_ ( self : Any ): """simple docstring""" A_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ : int = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def a_ ( self : Optional[int] ): """simple docstring""" A_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def a_ ( self : Union[str, Any] ): """simple docstring""" A_ : List[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset A_ : List[str] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) A_ : Any = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 A_ : int = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 A_ : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowerCamelCase ) self.assertListEqual(encoding.boxes , _lowerCamelCase ) # with apply_OCR = False A_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
167
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowercase ( __UpperCAmelCase , __UpperCAmelCase): __lowerCAmelCase : List[Any] = 1 @register_to_config def __init__( self : Union[str, Any] , _lowerCamelCase : int = 10_00 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ): """simple docstring""" self.set_timesteps(_lowerCamelCase ) # standard deviation of the initial noise distribution A_ : Optional[Any] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. A_ : List[str] = 4 # running values A_ : Optional[int] = [] def a_ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ): """simple docstring""" A_ : Tuple = num_inference_steps A_ : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] A_ : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: A_ : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: A_ : int = torch.sin(steps * math.pi / 2 ) ** 2 A_ : Dict = (1.0 - self.betas**2) ** 0.5 A_ : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] A_ : int = timesteps.to(_lowerCamelCase ) A_ : int = [] def a_ ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) A_ : Union[str, Any] = (self.timesteps == timestep).nonzero().item() A_ : Dict = timestep_index + 1 A_ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(_lowerCamelCase ) if len(self.ets ) == 1: A_ : Dict = self.ets[-1] elif len(self.ets ) == 2: A_ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: A_ : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: A_ : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) A_ : Union[str, Any] = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_lowerCamelCase ) def a_ ( self : Any , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ): """simple docstring""" return sample def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Optional[Any] = self.alphas[timestep_index] A_ : Union[str, Any] = self.betas[timestep_index] A_ : int = self.alphas[prev_timestep_index] A_ : Tuple = self.betas[prev_timestep_index] A_ : str = (sample - sigma * ets) / max(_lowerCamelCase , 1E-8 ) A_ : List[str] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[str] ): """simple docstring""" return self.config.num_train_timesteps
167
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments UpperCamelCase_ = logging.getLogger(__name__) @dataclass class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) A : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''whether to use adafactor'''} ) A : Optional[float] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) A : Optional[float] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) A : Optional[float] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) A : Optional[float] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) A : Optional[str] = field( default='''linear''' , metadata={'''help''': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
358
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = value SCREAMING_SNAKE_CASE : Node | None = None SCREAMING_SNAKE_CASE : Node | None = None class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = tree def UpperCamelCase_ ( self, A ): '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ): '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
246
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = ["input_values", "padding_mask"] def __init__( self , __A = 1 , __A = 2_4000 , __A = 0.0 , __A = None , __A = None , **__A , ): """simple docstring""" super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) lowerCamelCase : List[Any] = chunk_length_s lowerCamelCase : str = overlap @property def _snake_case ( self ): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _snake_case ( self ): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , __A , __A = None , __A = False , __A = None , __A = None , __A = None , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs lowerCamelCase : int = True lowerCamelCase : int = bool( isinstance(__A , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase : List[Any] = [np.asarray(__A , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__A , np.ndarray ): lowerCamelCase : Optional[Any] = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowerCamelCase : Union[str, Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase : List[str] = [np.asarray(__A ).T] # verify inputs are valid for idx, example in enumerate(__A ): if example.ndim > 2: raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" ) lowerCamelCase : List[Any] = None lowerCamelCase : Tuple = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowerCamelCase : Optional[Any] = min(array.shape[0] for array in raw_audio ) lowerCamelCase : str = int(np.floor(max_length / self.chunk_stride ) ) lowerCamelCase : str = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowerCamelCase : str = max(array.shape[0] for array in raw_audio ) lowerCamelCase : Dict = int(np.ceil(max_length / self.chunk_stride ) ) lowerCamelCase : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowerCamelCase : Dict = "max_length" else: lowerCamelCase : Union[str, Any] = input_values # normal padding on batch if padded_inputs is None: lowerCamelCase : str = self.pad( __A , max_length=__A , truncation=__A , padding=__A , return_attention_mask=__A , ) if padding: lowerCamelCase : Dict = padded_inputs.pop("attention_mask" ) lowerCamelCase : Dict = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: lowerCamelCase : Any = example[..., None] input_values.append(example.T ) lowerCamelCase : int = input_values if return_tensors is not None: lowerCamelCase : List[Any] = padded_inputs.convert_to_tensors(__A ) return padded_inputs
283
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCAmelCase ( __snake_case ): lowerCamelCase_ : Optional[int] = ['image_processor', 'tokenizer'] lowerCamelCase_ : Dict = 'CLIPImageProcessor' lowerCamelCase_ : Optional[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __magic_name__ , ) snake_case_ : str = kwargs.pop('''feature_extractor''' ) snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__magic_name__ , __magic_name__ ) def __call__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: snake_case_ : Union[str, Any] = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if images is not None: snake_case_ : List[Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None and images is not None: snake_case_ : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Optional[int]: '''simple docstring''' return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self.tokenizer.model_input_names snake_case_ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase (self ) -> Tuple: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , ) return self.image_processor_class @property def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , ) return self.image_processor
362
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[str] = GPTSwaTokenizer lowerCamelCase_ : str = False lowerCamelCase_ : str = True lowerCamelCase_ : List[Any] = False def lowerCamelCase (self ) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Union[str, Any] = GPTSwaTokenizer(__magic_name__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Any = '''This is a test''' snake_case_ : str = '''This is a test''' return input_text, output_text def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : int = '''<s>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__magic_name__ ) , 2000 ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = GPTSwaTokenizer(__magic_name__ ) snake_case_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [465, 287, 265, 631, 842] ) snake_case_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( __magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on snake_case_ : int = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) snake_case_ : Dict = tokenizer.convert_ids_to_tokens(__magic_name__ ) # fmt: off self.assertListEqual( __magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = GPTSwaTokenizer(__magic_name__ ) snake_case_ : Tuple = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] snake_case_ : Optional[int] = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__magic_name__ , __magic_name__ ): self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ ) # Test that decode_fast returns the input text for text, token_ids in zip(__magic_name__ , __magic_name__ ): self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : str = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off snake_case_ : str = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__magic_name__ , )
279
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _a = logging.get_logger(__name__) _a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _a = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } _a = { '''yjernite/retribert-base-uncased''': 5_1_2, } _a = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class A_ ( snake_case__ ): _lowercase : Union[str, Any] = VOCAB_FILES_NAMES _lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP _lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : int = PRETRAINED_INIT_CONFIGURATION _lowercase : Optional[Any] = RetriBertTokenizer _lowercase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]="[UNK]" , UpperCAmelCase : Dict="[SEP]" , UpperCAmelCase : List[str]="[PAD]" , UpperCAmelCase : str="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Dict=True , UpperCAmelCase : str=None , **UpperCAmelCase : List[str] , ) -> int: super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) __lowerCAmelCase: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars ): __lowerCAmelCase: List[str] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) ) __lowerCAmelCase: int = do_lower_case __lowerCAmelCase: Optional[Any] = strip_accents __lowerCAmelCase: List[Any] = tokenize_chinese_chars __lowerCAmelCase: Dict = normalizer_class(**UpperCAmelCase ) __lowerCAmelCase: List[str] = do_lower_case def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=None ) -> str: __lowerCAmelCase: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: __lowerCAmelCase: Union[str, Any] = [self.sep_token_id] __lowerCAmelCase: str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: __lowerCAmelCase: Dict = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
322
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _a = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _a = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Optional[int] = SavedModel() __lowerCAmelCase: str = [] with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: __lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] ) with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: saved_model.ParseFromString(f.read() ) __lowerCAmelCase: Optional[int] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want __lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(SCREAMING_SNAKE_CASE ) if strict and len(SCREAMING_SNAKE_CASE ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(SCREAMING_SNAKE_CASE ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*SCREAMING_SNAKE_CASE , sep='\n' ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) _a = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
322
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __snake_case : Any ='\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __snake_case : Dict ='\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __snake_case : Union[str, Any] ='\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): '''simple docstring''' def lowerCAmelCase__ (self ) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ) ,id='''references''' ), } ) ,) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 1 ,__lowerCamelCase = 4 ,) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase ,hypotheses=__lowerCamelCase ,min_len=__lowerCamelCase ,max_len=__lowerCamelCase ) }
351
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000): '''simple docstring''' lowerCAmelCase__ : int = set(range(3 ,lowerCamelCase_ ,2)) primes.add(2) for p in range(3 ,lowerCamelCase_ ,2): if p not in primes: continue primes.difference_update(set(range(p * p ,lowerCamelCase_ ,lowerCamelCase_))) lowerCAmelCase__ : int = [float(lowerCamelCase_) for n in range(limit + 1)] for p in primes: for n in range(lowerCamelCase_ ,limit + 1 ,lowerCamelCase_): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(f"""{solution() = }""")
94
0
def lowerCAmelCase__(__snake_case ) -> list[list]: '''simple docstring''' lowerCamelCase__ = current_set.copy() for row_index, row in enumerate(__snake_case ): lowerCamelCase__ = row[0] for column_index, column in enumerate(__snake_case ): if magnitude == 0: lowerCamelCase__ = column continue lowerCamelCase__ = column / magnitude # Subtract to cancel term lowerCamelCase__ = current_set[0] lowerCamelCase__ = [first_row] lowerCamelCase__ = current_set[1::] for row in current_set: lowerCamelCase__ = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__snake_case ) continue for column_index in range(len(__snake_case ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__snake_case ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowerCamelCase__ = final_set[0] lowerCamelCase__ = [] lowerCamelCase__ = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowerCamelCase__ = simplify(__snake_case ) for i in range(len(__snake_case ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,__snake_case ) lowerCamelCase__ = resultant return final_set def lowerCAmelCase__(__snake_case ) -> list: '''simple docstring''' if len(__snake_case ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) lowerCamelCase__ = len(__snake_case ) + 1 if any(len(__snake_case ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__snake_case ,(int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__snake_case ) == 1: return [equations[0][-1] / equations[0][0]] lowerCamelCase__ = equations.copy() if any(0 in row for row in data_set ): lowerCamelCase__ = data_set.copy() lowerCamelCase__ = [] for row_index, row in enumerate(__snake_case ): if 0 not in row: lowerCamelCase__ = data_set.pop(__snake_case ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 ,__snake_case ) lowerCamelCase__ = data_set.copy() lowerCamelCase__ = simplify(__snake_case ) lowerCamelCase__ = simplified[::-1] lowerCamelCase__ = [] for row in simplified: lowerCamelCase__ = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowerCamelCase__ = row.copy()[: len(__snake_case ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__snake_case ) == 0: solutions.append(0 ) continue lowerCamelCase__ = temp_row[1::] lowerCamelCase__ = temp_row[::-1] for column_index, column in enumerate(__snake_case ): current_solution -= column * solutions[column_index] solutions.append(__snake_case ) lowerCamelCase__ = [] for item in solutions: final.append(float(round(__snake_case ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _a = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
209
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(__snake_case ) * abs(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
209
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig UpperCamelCase = logging.get_logger(__name__) # General docstring UpperCamelCase = 'PoolFormerConfig' # Base docstring UpperCamelCase = 'sail/poolformer_s12' UpperCamelCase = [1, 512, 7, 7] # Image classification docstring UpperCamelCase = 'sail/poolformer_s12' UpperCamelCase = 'tabby, tabby cat' UpperCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input lowerCAmelCase__ = 1 - drop_prob lowerCAmelCase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowerCAmelCase__ = keep_prob + torch.rand(lowerCAmelCase_ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowerCAmelCase__ = input.div(lowerCAmelCase_ ) * random_tensor return output class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[float] = None ) -> None: super().__init__() lowerCAmelCase__ = drop_prob def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor: return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training ) def a ( self : Optional[Any] ) -> str: return "p={}".format(self.drop_prob ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=None ) -> Optional[Any]: super().__init__() lowerCAmelCase__ = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size) lowerCAmelCase__ = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride) lowerCAmelCase__ = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding) lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity() def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: lowerCAmelCase__ = self.projection(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.norm(SCREAMING_SNAKE_CASE__ ) return embeddings class __lowerCamelCase ( nn.GroupNorm ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict: super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: super().__init__() lowerCAmelCase__ = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ ) def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: super().__init__() lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = ACTaFN[config.hidden_act] else: lowerCAmelCase__ = config.hidden_act def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]: lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.act_fn(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ ) return hidden_states class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: super().__init__() lowerCAmelCase__ = PoolFormerPooling(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ ) # Useful for training neural nets lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity() lowerCAmelCase__ = config.use_layer_scale if config.use_layer_scale: lowerCAmelCase__ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int: if self.use_layer_scale: lowerCAmelCase__ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = () lowerCAmelCase__ = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = (output,) + outputs return outputs else: lowerCAmelCase__ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) ) # First residual connection lowerCAmelCase__ = pooling_output + hidden_states lowerCAmelCase__ = () # Second residual connection inside the PoolFormerOutput block lowerCAmelCase__ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) ) lowerCAmelCase__ = hidden_states + layer_output lowerCAmelCase__ = (output,) + outputs return outputs class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any: super().__init__() lowerCAmelCase__ = config # stochastic depth decay rule lowerCAmelCase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowerCAmelCase__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ ) # Transformer blocks lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowerCAmelCase__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Dict: lowerCAmelCase__ = () if output_hidden_states else None lowerCAmelCase__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowerCAmelCase__ , lowerCAmelCase__ = layers # Get patch embeddings from hidden_states lowerCAmelCase__ = embedding_layer(SCREAMING_SNAKE_CASE__ ) # Send the embeddings through the blocks for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = blk(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = layer_outputs[0] if output_hidden_states: lowerCAmelCase__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = PoolFormerConfig snake_case__ = "poolformer" snake_case__ = "pixel_values" snake_case__ = True def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Tuple: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = value UpperCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: super().__init__(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = config lowerCAmelCase__ = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ ) # Initialize weights and apply final processing self.post_init() def a ( self : Optional[int] ) -> Optional[Any]: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) lowerCAmelCase__ = self.encoder( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: super().__init__() lowerCAmelCase__ = nn.Linear(config.hidden_size , config.hidden_size ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: lowerCAmelCase__ = self.dense(SCREAMING_SNAKE_CASE__ ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: super().__init__(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = PoolFormerModel(SCREAMING_SNAKE_CASE__ ) # Final norm lowerCAmelCase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowerCAmelCase__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.poolformer( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = outputs[0] lowerCAmelCase__ = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) ) lowerCAmelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase__ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase__ = "single_label_classification" else: lowerCAmelCase__ = "multi_label_classification" if self.config.problem_type == "regression": lowerCAmelCase__ = MSELoss() if self.num_labels == 1: lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase__ = CrossEntropyLoss() lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase__ = BCEWithLogitsLoss() lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
221
from __future__ import annotations UpperCamelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _A ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[int]] , ): """simple docstring""" lowerCAmelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) ) ] # the reference grid lowerCAmelCase__ = 1 lowerCAmelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) ) ] # the action grid lowerCAmelCase__ = init[0] lowerCAmelCase__ = init[1] lowerCAmelCase__ = 0 lowerCAmelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell lowerCAmelCase__ = [[f, g, x, y]] lowerCAmelCase__ = False # flag that is set when search is complete lowerCAmelCase__ = False # flag set if we can't find expand while not found and not resign: if len(lowerCAmelCase_ ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCAmelCase__ = cell.pop() lowerCAmelCase__ = next_cell[2] lowerCAmelCase__ = next_cell[3] lowerCAmelCase__ = next_cell[1] if x == goal[0] and y == goal[1]: lowerCAmelCase__ = True else: for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions lowerCAmelCase__ = x + DIRECTIONS[i][0] lowerCAmelCase__ = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCAmelCase__ = g + cost lowerCAmelCase__ = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCAmelCase__ = 1 lowerCAmelCase__ = i lowerCAmelCase__ = [] lowerCAmelCase__ = goal[0] lowerCAmelCase__ = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCAmelCase__ = x - DIRECTIONS[action[x][y]][0] lowerCAmelCase__ = y - DIRECTIONS[action[x][y]][1] lowerCAmelCase__ = xa lowerCAmelCase__ = ya invpath.append([x, y] ) lowerCAmelCase__ = [] for i in range(len(lowerCAmelCase_ ) ): path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCamelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCamelCase = [0, 0] # all coordinates are given in format [y,x] UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1] UpperCamelCase = 1 # the cost map which pushes the path closer to the goal UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCamelCase = 99 UpperCamelCase , UpperCamelCase = search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
221
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" def __A ( self , A ) -> Optional[Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_A ) def __A ( self ) -> str: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase = """sgugger/tiny-distilbert-classification""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> int: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , torchscript=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def __A ( self ) -> str: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , fpaa=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = AutoConfig.from_pretrained(_A ) # set architectures equal to `None` lowerCamelCase = None lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A , configs=[config] ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> str: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_A , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ) -> Dict: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = AutoConfig.from_pretrained(_A ) lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A , configs=[config] ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = """sshleifer/tinier_bart""" lowerCamelCase = AutoConfig.from_pretrained(_A ) lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A , configs=[config] ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ) -> Any: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" lowerCamelCase = AutoConfig.from_pretrained(_A ) lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A , configs=[config] ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = """sshleifer/tinier_bart""" lowerCamelCase = AutoConfig.from_pretrained(_A ) lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A , configs=[config] ) lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ) -> List[str]: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_A , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_A , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_A , """train_time.csv""" ) , env_info_csv_file=os.path.join(_A , """env.csv""" ) , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) benchmark.run() self.assertTrue(Path(os.path.join(_A , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , """env.csv""" ) ).exists() ) def __A ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(A ): self.assertTrue(hasattr(_A , """sequential""" ) ) self.assertTrue(hasattr(_A , """cumulative""" ) ) self.assertTrue(hasattr(_A , """current""" ) ) self.assertTrue(hasattr(_A , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , """log.txt""" ) , log_print=_A , trace_memory_line_by_line=_A , multi_process=_A , ) lowerCamelCase = PyTorchBenchmark(_A ) lowerCamelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_A , """log.txt""" ) ).exists() )
252
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any]=8 ) -> Union[str, Any]: _UpperCAmelCase : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _UpperCAmelCase : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=512, _lowerCAmelCase : Optional[Any]=512 ) -> Optional[Any]: _UpperCAmelCase : Optional[int] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 ) _UpperCAmelCase : List[Any] = np.array(pil_image.convert("""RGB""" ) ) _UpperCAmelCase : str = arr.astype(np.floataa ) / 127.5 - 1 _UpperCAmelCase : Dict = np.transpose(_lowerCAmelCase, [2, 0, 1] ) _UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ) return image class _UpperCAmelCase ( __a): def __init__( self , _A , _A , _A , ) -> int: '''simple docstring''' super().__init__() self.register_modules( unet=_A , scheduler=_A , movq=_A , ) _UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __snake_case ( self , _A , _A , _A ) -> List[Any]: '''simple docstring''' _UpperCAmelCase : int = min(int(num_inference_steps * strength ) , _A ) _UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 ) _UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]: '''simple docstring''' if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' ) _UpperCAmelCase : Any = image.to(device=_A , dtype=_A ) _UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt if image.shape[1] == 4: _UpperCAmelCase : Dict = image else: if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_A )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(_A , _A ): _UpperCAmelCase : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A ) ] _UpperCAmelCase : List[Any] = torch.cat(_A , dim=0 ) else: _UpperCAmelCase : str = self.movq.encode(_A ).latent_dist.sample(_A ) _UpperCAmelCase : Any = self.movq.config.scaling_factor * init_latents _UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 ) _UpperCAmelCase : Union[str, Any] = init_latents.shape _UpperCAmelCase : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) # get latents _UpperCAmelCase : Optional[int] = self.scheduler.add_noise(_A , _A , _A ) _UpperCAmelCase : Optional[int] = init_latents return latents def __snake_case ( self , _A=0 ) -> Optional[Any]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) _UpperCAmelCase : Tuple = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_A , _A ) def __snake_case ( self , _A=0 ) -> int: '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _UpperCAmelCase : int = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=_A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _UpperCAmelCase : Any = None for cpu_offloaded_model in [self.unet, self.movq]: _UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A ) # We'll offload the last model manually. _UpperCAmelCase : Optional[int] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __snake_case ( self ) -> List[str]: '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(_A , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_A ) def __call__( self , _A , _A , _A , _A = 5_12 , _A = 5_12 , _A = 1_00 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = self._execution_device _UpperCAmelCase : List[str] = guidance_scale > 1.0 if isinstance(_A , _A ): _UpperCAmelCase : Dict = torch.cat(_A , dim=0 ) _UpperCAmelCase : Any = image_embeds.shape[0] if isinstance(_A , _A ): _UpperCAmelCase : Any = torch.cat(_A , dim=0 ) if do_classifier_free_guidance: _UpperCAmelCase : str = image_embeds.repeat_interleave(_A , dim=0 ) _UpperCAmelCase : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 ) _UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A ) if not isinstance(_A , _A ): _UpperCAmelCase : str = [image] if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) _UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 ) _UpperCAmelCase : List[Any] = image.to(dtype=image_embeds.dtype , device=_A ) _UpperCAmelCase : int = self.movq.encode(_A )["""latents"""] _UpperCAmelCase : Dict = latents.repeat_interleave(_A , dim=0 ) self.scheduler.set_timesteps(_A , device=_A ) _UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(_A , _A , _A ) _UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(_A , _A , self.movq_scale_factor ) _UpperCAmelCase : List[Any] = self.prepare_latents( _A , _A , _A , _A , image_embeds.dtype , _A , _A ) for i, t in enumerate(self.progress_bar(_A ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds} _UpperCAmelCase : str = self.unet( sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0] if do_classifier_free_guidance: _UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 ) _UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.chunk(2 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = variance_pred.chunk(2 ) _UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _UpperCAmelCase : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase : List[Any] = self.scheduler.step( _A , _A , _A , generator=_A , )[0] # post-processing _UpperCAmelCase : Optional[int] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _UpperCAmelCase : Any = image * 0.5 + 0.5 _UpperCAmelCase : Dict = image.clamp(0 , 1 ) _UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _UpperCAmelCase : List[str] = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
246
0
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __snake_case = logging.get_logger(__name__) __snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __snake_case = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __snake_case = { """allenai/led-base-16384""": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __lowerCAmelCase ( ) -> str: """simple docstring""" snake_case : List[str] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case : List[str] = bs[:] snake_case : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 snake_case : List[Any] = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def __lowerCAmelCase ( lowercase : Dict ) -> Any: """simple docstring""" snake_case : Tuple = set() snake_case : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case : Union[str, Any] = char return pairs class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Any = VOCAB_FILES_NAMES __UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token snake_case : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token snake_case : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle: snake_case : str = json.load(UpperCamelCase__ ) snake_case : Dict = {v: k for k, v in self.encoder.items()} snake_case : Dict = errors # how to handle errors in decoding snake_case : Union[str, Any] = bytes_to_unicode() snake_case : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle: snake_case : Dict = merges_handle.read().split("\n" )[1:-1] snake_case : str = [tuple(merge.split() ) for merge in bpe_merges] snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) snake_case : Optional[Any] = {} snake_case : str = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case : int = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case : str = tuple(UpperCamelCase__ ) snake_case : Optional[Any] = get_pairs(UpperCamelCase__ ) if not pairs: return token while True: snake_case : List[Any] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case ,snake_case : Any = bigram snake_case : List[Any] = [] snake_case : Any = 0 while i < len(UpperCamelCase__ ): try: snake_case : Dict = word.index(UpperCamelCase__ , UpperCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case : Tuple = j if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case : Optional[Any] = tuple(UpperCamelCase__ ) snake_case : Any = new_word if len(UpperCamelCase__ ) == 1: break else: snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ ) snake_case : Dict = " ".join(UpperCamelCase__ ) snake_case : int = word return word def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' snake_case : List[Any] = [] for token in re.findall(self.pat , UpperCamelCase__ ): snake_case : str = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) ) return bpe_tokens def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) ) def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' return self.decoder.get(UpperCamelCase__ ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : Optional[int] = "".join(UpperCamelCase__ ) snake_case : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return snake_case : List[Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case : str = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" ) snake_case : str = 0 with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' " Please check that the tokenizer is not corrupted!" ) snake_case : int = token_index writer.write(" ".join(UpperCamelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] snake_case : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: '''simple docstring''' snake_case : Tuple = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()): snake_case : Dict = " " + text return (text, kwargs) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> dict: '''simple docstring''' snake_case : Union[str, Any] = super()._pad( encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ) # Load from model defaults if return_attention_mask is None: snake_case : Optional[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case : int = len(encoded_inputs["global_attention_mask"] ) != len(UpperCamelCase__ ) if needs_to_be_padded: snake_case : List[str] = len(UpperCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case : Any = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case : Dict = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
112
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) snake_case : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : Any = self.dummy_uncond_unet snake_case : Tuple = KarrasVeScheduler() snake_case : int = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : Optional[Any] = torch.manual_seed(0 ) snake_case : List[Any] = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" ).images snake_case : Dict = torch.manual_seed(0 ) snake_case : Dict = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" , return_dict=UpperCamelCase__ )[0] snake_case : Tuple = image[0, -3:, -3:, -1] snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' snake_case : Optional[Any] = "google/ncsnpp-celebahq-256" snake_case : List[str] = UNetaDModel.from_pretrained(UpperCamelCase__ ) snake_case : Optional[Any] = KarrasVeScheduler() snake_case : Optional[int] = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : Dict = torch.manual_seed(0 ) snake_case : Union[str, Any] = pipe(num_inference_steps=20 , generator=UpperCamelCase__ , output_type="numpy" ).images snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) snake_case : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
112
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int | str] ): create_state_space_tree(_SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int | str] , _SCREAMING_SNAKE_CASE : list[int | str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , ): if index == len(_SCREAMING_SNAKE_CASE ): print(_SCREAMING_SNAKE_CASE ) return for i in range(len(_SCREAMING_SNAKE_CASE ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __a : Union[str, Any] = True create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE ) current_sequence.pop() __a : Any = False __lowercase : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __lowercase : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
27
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
0
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( A , A=0.999 , A="cosine" , ) -> int: if alpha_transform_type == "cosine": def alpha_bar_fn(A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase__ = [] for i in range(A ): lowerCAmelCase__ = i / num_diffusion_timesteps lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) ) return torch.tensor(A , dtype=torch.floataa ) class a__ ( a__ , a__ ): '''simple docstring''' lowercase__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] lowercase__ : List[Any] = 2 @register_to_config def __init__( self , lowerCamelCase_ = 10_00 , lowerCamelCase_ = 0.00_085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ) -> Tuple: if trained_betas is not None: lowerCAmelCase__ = torch.tensor(lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase__ = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ = betas_for_alpha_bar(lowerCamelCase_ ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowerCAmelCase__ = 1.0 - self.betas lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Any: if schedule_timesteps is None: lowerCAmelCase__ = self.timesteps lowerCAmelCase__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase__ = 1 if len(lowerCamelCase_ ) > 1 else 0 else: lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep lowerCAmelCase__ = self._index_counter[timestep_int] return indices[pos].item() @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , ) -> torch.FloatTensor: lowerCAmelCase__ = self.index_for_timestep(lowerCamelCase_ ) if self.state_in_first_order: lowerCAmelCase__ = self.sigmas[step_index] else: lowerCAmelCase__ = self.sigmas_interpol[step_index] lowerCAmelCase__ = sample / ((sigma**2 + 1) ** 0.5) return sample def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Optional[Any]: lowerCAmelCase__ = num_inference_steps lowerCAmelCase__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase__ = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowerCAmelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase__ = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ ) lowerCAmelCase__ = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ ) lowerCAmelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase__ = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ ) # interpolate sigmas lowerCAmelCase__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() lowerCAmelCase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase__ = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowerCamelCase_ ).startswith('''mps''' ): # mps does not support float64 lowerCAmelCase__ = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa ) else: lowerCAmelCase__ = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ ) # interpolate timesteps lowerCAmelCase__ = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype ) lowerCAmelCase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() lowerCAmelCase__ = torch.cat([timesteps[:1], interleaved_timesteps] ) lowerCAmelCase__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase__ = defaultdict(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]: # get log sigma lowerCAmelCase__ = sigma.log() # get distribution lowerCAmelCase__ = log_sigma - self.log_sigmas[:, None] # get sigmas range lowerCAmelCase__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowerCAmelCase__ = low_idx + 1 lowerCAmelCase__ = self.log_sigmas[low_idx] lowerCAmelCase__ = self.log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase__ = (low - log_sigma) / (low - high) lowerCAmelCase__ = w.clamp(0 , 1 ) # transform interpolation to time range lowerCAmelCase__ = (1 - w) * low_idx + w * high_idx lowerCAmelCase__ = t.view(sigma.shape ) return t @property def __SCREAMING_SNAKE_CASE ( self ) -> Any: return self.sample is None def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[SchedulerOutput, Tuple]: lowerCAmelCase__ = self.index_for_timestep(lowerCamelCase_ ) # advance index counter by 1 lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase__ = self.sigmas[step_index] lowerCAmelCase__ = self.sigmas_interpol[step_index + 1] lowerCAmelCase__ = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowerCAmelCase__ = self.sigmas[step_index - 1] lowerCAmelCase__ = self.sigmas_interpol[step_index] lowerCAmelCase__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase__ = 0 lowerCAmelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase__ = sigma_interpol - sigma_hat # store for 2nd order step lowerCAmelCase__ = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowerCAmelCase__ = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowerCAmelCase__ = sigma_next - sigma_hat lowerCAmelCase__ = self.sample lowerCAmelCase__ = None lowerCAmelCase__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ): # mps does not support float64 lowerCAmelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCAmelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCAmelCase__ = self.timesteps.to(original_samples.device ) lowerCAmelCase__ = timesteps.to(original_samples.device ) lowerCAmelCase__ = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps] lowerCAmelCase__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase__ = sigma.unsqueeze(-1 ) lowerCAmelCase__ = original_samples + noise * sigma return noisy_samples def __len__( self ) -> str: return self.config.num_train_timesteps
228
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _snake_case ( A , A , A , A=5 ) -> List[str]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1 lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowerCAmelCase__ = logits[0, masked_index, :] lowerCAmelCase__ = logits.softmax(dim=0 ) lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 ) lowerCAmelCase__ = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] ) lowerCAmelCase__ = tokenizer.mask_token lowerCAmelCase__ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(A ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(A ) , A ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(A , A ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''') __UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() __UpperCAmelCase = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
228
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __a = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' __a = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' __a = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def a ( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ): '''simple docstring''' return float((preds == labels).mean() ) def a ( snake_case__: List[str] , snake_case__: Dict ): '''simple docstring''' lowercase_ = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase_ = float(fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ ) ) return { "accuracy": acc, "f1": fa, } def a ( snake_case__: Optional[Any] , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = np.array(UpperCAmelCase_ ) lowercase_ = np.array(UpperCAmelCase_ ) lowercase_ = en_sentvecs.shape[0] # mean centering lowercase_ = en_sentvecs - np.mean(UpperCAmelCase_ , axis=0 ) lowercase_ = in_sentvecs - np.mean(UpperCAmelCase_ , axis=0 ) lowercase_ = cdist(UpperCAmelCase_ , UpperCAmelCase_ , '''cosine''' ) lowercase_ = np.array(range(UpperCAmelCase_ ) ) lowercase_ = sim.argsort(axis=1 )[:, :10] lowercase_ = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase__( datasets.Metric ): """simple docstring""" def _lowercase ( self : str ) -> Any: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_lowerCamelCase , _lowerCamelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_lowerCamelCase , _lowerCamelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
30
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = XLNetTokenizer lowercase__ : List[str] = XLNetTokenizerFast lowercase__ : Optional[Any] = True lowercase__ : int = True def __SCREAMING_SNAKE_CASE ( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = '''<s>''' lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(lowerCamelCase_ ) , 10_06 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: # fmt: off lowerCAmelCase__ = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
362
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _snake_case ( A , A , A , A , ) -> list[float]: lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape if rowsa != colsa: lowerCAmelCase__ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(A ) if colsa != 1: lowerCAmelCase__ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(A ) if rowsa != rowsa: lowerCAmelCase__ = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(A ) if len(A ) != rowsa: lowerCAmelCase__ = ( '''Number of initial values must be equal to number of rows in coefficient ''' F"""matrix but received {len(A )} and {rowsa}""" ) raise ValueError(A ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) lowerCAmelCase__ = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowerCAmelCase__ , lowerCAmelCase__ = table.shape strictly_diagonally_dominant(A ) # Iterates the whole matrix for given number of times for _ in range(A ): lowerCAmelCase__ = [] for row in range(A ): lowerCAmelCase__ = 0 for col in range(A ): if col == row: lowerCAmelCase__ = table[row][col] elif col == cols - 1: lowerCAmelCase__ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowerCAmelCase__ = (temp + val) / denom new_val.append(A ) lowerCAmelCase__ = new_val return [float(A ) for i in new_val] def _snake_case ( A ) -> bool: lowerCAmelCase__ , lowerCAmelCase__ = table.shape lowerCAmelCase__ = True for i in range(0 , A ): lowerCAmelCase__ = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
228
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["ConditionalDetrFeatureExtractor"] __lowerCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
221
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__: lowerCAmelCase__ : str = field( default=__A , metadata={'help': 'Model type selected in the list: ' + ', '.join(__A )} ) lowerCAmelCase__ : str = field( default=__A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowerCAmelCase__ : int = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase__ : int = field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowerCAmelCase__ : int = field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowerCAmelCase__ : int = field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowerCAmelCase__ : bool = field( default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCAmelCase__ : bool = field( default=__A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowerCAmelCase__ : float = field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCAmelCase__ : int = field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCAmelCase__ : int = field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowerCAmelCase__ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class UpperCamelCase__( __A ): lowerCAmelCase__ : Dict = 'train' lowerCAmelCase__ : int = 'dev' class UpperCamelCase__( __A ): lowerCAmelCase__ : SquadDataTrainingArguments lowerCAmelCase__ : List[SquadFeatures] lowerCAmelCase__ : Split lowerCAmelCase__ : bool def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = Split.train ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = "pt" ,) -> Optional[Any]: A__ = args A__ = is_language_sensitive A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): try: A__ = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) A__ = mode # Load data features from cache or dataset file A__ = 'v2' if args.version_2_with_negative else 'v1' A__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A__ = cached_features_file + '.lock' with FileLock(__UpperCAmelCase ): if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache: A__ = time.time() A__ = torch.load(__UpperCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. A__ = self.old_features['features'] A__ = self.old_features.get('dataset' ,__UpperCAmelCase ) A__ = self.old_features.get('examples' ,__UpperCAmelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: A__ = self.processor.get_dev_examples(args.data_dir ) else: A__ = self.processor.get_train_examples(args.data_dir ) A__ , A__ = squad_convert_examples_to_features( examples=self.examples ,tokenizer=__UpperCAmelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=__UpperCAmelCase ,) A__ = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,__UpperCAmelCase ,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Union[str, Any]: return len(self.features ) def __getitem__( self ,__UpperCAmelCase ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset A__ = self.features[i] A__ = torch.tensor(feature.input_ids ,dtype=torch.long ) A__ = torch.tensor(feature.attention_mask ,dtype=torch.long ) A__ = torch.tensor(feature.token_type_ids ,dtype=torch.long ) A__ = torch.tensor(feature.cls_index ,dtype=torch.long ) A__ = torch.tensor(feature.p_mask ,dtype=torch.float ) A__ = torch.tensor(feature.is_impossible ,dtype=torch.float ) A__ = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: A__ = torch.tensor(feature.start_position ,dtype=torch.long ) A__ = torch.tensor(feature.end_position ,dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
221
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _lowerCAmelCase ( lowercase ) -> Tuple: __lowerCAmelCase = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def _lowerCAmelCase ( lowercase ) -> int: __lowerCAmelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __lowerCAmelCase = s_dict.pop(_lowerCAmelCase ) elif "subsample" in key: __lowerCAmelCase = s_dict.pop(_lowerCAmelCase ) def _lowerCAmelCase ( lowercase ) -> Optional[int]: __lowerCAmelCase = emb.weight.shape __lowerCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) __lowerCAmelCase = emb.weight.data return lin_layer def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple: __lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" ) __lowerCAmelCase = mam_aaa["args"] __lowerCAmelCase = mam_aaa["model"] __lowerCAmelCase = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(_lowerCAmelCase ) rename_keys(_lowerCAmelCase ) __lowerCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0] __lowerCAmelCase = args.share_decoder_input_output_embed __lowerCAmelCase = [int(_lowerCAmelCase ) for i in args.conv_kernel_sizes.split(""",""" )] __lowerCAmelCase = SpeechaTextConfig( vocab_size=_lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(_lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowerCAmelCase , num_beams=5 , max_length=200 , use_cache=_lowerCAmelCase , decoder_start_token_id=2 , early_stopping=_lowerCAmelCase , ) __lowerCAmelCase = SpeechaTextForConditionalGeneration(_lowerCAmelCase ) __lowerCAmelCase = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f' but all the following weights are missing {missing}' ) if tie_embeds: __lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __lowerCAmelCase = lm_head_weights model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") _a : Dict = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
353
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : Optional[int] = """▁""" _a : Any = {"""vocab_file""": """sentencepiece.bpe.model"""} _a : List[Any] = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } _a : Tuple = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[Any] =VOCAB_FILES_NAMES a : str =PRETRAINED_VOCAB_FILES_MAP a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] =["""input_ids""", """attention_mask"""] def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = AddedToken(__SCREAMING_SNAKE_CASE,lstrip=__SCREAMING_SNAKE_CASE,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowerCAmelCase = 1 __lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset __lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): '''simple docstring''' __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None __lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = d # for backward compatibility if not hasattr(self,"""sp_model_kwargs""" ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase__ ( self ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCAmelCase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip() return out_string def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
46
0
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa UpperCamelCase__ : str = logging.getLogger(__name__) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Union[str, Any] = '''summarization''' _A : Optional[Any] = ['''loss'''] _A : Tuple = ROUGE_KEYS _A : int = '''rouge2''' def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: __SCREAMING_SNAKE_CASE : Any = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json""" __SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = self.config.model_type __SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size __SCREAMING_SNAKE_CASE : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } __SCREAMING_SNAKE_CASE : List[Any] = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } __SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} __SCREAMING_SNAKE_CASE : Any = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) __SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""] __SCREAMING_SNAKE_CASE : Any = hparams.num_workers __SCREAMING_SNAKE_CASE : Tuple = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang] __SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id __SCREAMING_SNAKE_CASE : Optional[int] = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: __SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length else: __SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length __SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) __SCREAMING_SNAKE_CASE : Optional[int] = True return readable_batch def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ): """simple docstring""" return self.model(lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode( lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) return lmap(str.strip , lowerCAmelCase__ ) def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""] __SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""] if isinstance(self.model , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero __SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids self.save_readable_batch(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id __SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ ) assert lm_logits.shape[-1] == self.vocab_size __SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: __SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss( lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ ) return (loss,) @property def UpperCamelCase__ ( self : List[Any] ): """simple docstring""" return self.tokenizer.pad_token_id def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) ) # tokens per batch __SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0] __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum() __SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ): """simple docstring""" self.step_count += 1 __SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} __SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""] __SCREAMING_SNAKE_CASE : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } __SCREAMING_SNAKE_CASE : List[Any] = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) __SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()} __SCREAMING_SNAKE_CASE : Optional[int] = self.step_count self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path __SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, F"{prefix}_loss": loss, F"{prefix}_{self.val_metric}": metric_tensor, } def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ): """simple docstring""" return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') __SCREAMING_SNAKE_CASE : List[str] = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0] __SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] ) __SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) ) base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ ) return base_metrics def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" return self._generative_step(lowerCAmelCase__ ) def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ): """simple docstring""" return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" ) def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path] __SCREAMING_SNAKE_CASE : str = self.target_lens[type_path] __SCREAMING_SNAKE_CASE : str = self.dataset_class( self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , ) return dataset def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": __SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": __SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , ) def UpperCamelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ ) return dataloader def UpperCamelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def UpperCamelCase__ ( self : Tuple ): """simple docstring""" return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ ) add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ ) parser.add_argument( """--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ ) parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ ) parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ ) parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ ) parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ ) parser.add_argument( """--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : List[Any] = '''translation''' _A : int = ['''loss'''] _A : Union[str, Any] = ['''bleu'''] _A : Dict = '''bleu''' def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ): """simple docstring""" super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang __SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ): """simple docstring""" return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ ) def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ): Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase ) check_output_dir(_lowerCamelCase , expected_items=3 ) if model is None: if "summarization" in args.task: __SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): __SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger __SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase ) __SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger __SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" ) if args.early_stopping_patience >= 0: __SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss""" __SCREAMING_SNAKE_CASE : pl.Trainer = generic_train( _lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model __SCREAMING_SNAKE_CASE : Optional[int] = """""" __SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) ) if checkpoints: __SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1] __SCREAMING_SNAKE_CASE : str = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser) UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) UpperCamelCase__ : List[str] = parser.parse_args() main(args)
112
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: list[float] ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) __SCREAMING_SNAKE_CASE : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) ) return round(_lowerCamelCase , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
112
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase ( a_ , a_ , a_ ) -> Dict: # Construct model if gpta_config_file == "": lowerCAmelCase_ = GPTaConfig() else: lowerCAmelCase_ = GPTaConfig.from_json_file(__snake_case ) lowerCAmelCase_ = GPTaModel(__snake_case ) # Load weights from numpy load_tf_weights_in_gpta(__snake_case , __snake_case , __snake_case ) # Save pytorch-model lowerCAmelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME lowerCAmelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) lowerCamelCase_ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
356
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
"""simple docstring""" import cmath import math def lowercase ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ): lowercase_ : Optional[int] = math.radians(__snake_case ) lowercase_ : List[Any] = math.radians(__snake_case ) # Convert voltage and current to rectangular form lowercase_ : List[Any] = cmath.rect(__snake_case , __snake_case ) lowercase_ : int = cmath.rect(__snake_case , __snake_case ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase: Union[str, Any] = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: Dict = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: int = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _lowercase: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
227
0
'''simple docstring''' from __future__ import annotations import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = u for i in range(1 , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = temp * (u - i) return temp def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = int(input("""enter the numbers of values: """ ) ) UpperCAmelCase__ = [] for _ in range(SCREAMING_SNAKE_CASE__ ): y.append([] ) for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): y[i].append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = 0 print("""enter the values of parameters in a list: """ ) UpperCAmelCase__ = list(map(SCREAMING_SNAKE_CASE__ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = float(input() ) UpperCAmelCase__ = int(input("""enter the value to interpolate: """ ) ) UpperCAmelCase__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , SCREAMING_SNAKE_CASE__ ): for j in range(n - i ): UpperCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase__ = y[0][0] for i in range(1 , SCREAMING_SNAKE_CASE__ ): summ += (ucal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE__ ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
61
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def _UpperCamelCase ( ): '''simple docstring''' print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
from __future__ import annotations from math import ceil, floor, sqrt def _a ( UpperCAmelCase = 2000000 ) -> int: """simple docstring""" lowerCamelCase__ : Tuple = [0] lowerCamelCase__ : Any = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target lowerCamelCase__ : str = 0 # the area corresponding to the grid that gives the product closest to target lowerCamelCase__ : Tuple = 0 # an estimate of b, using the quadratic formula lowerCamelCase__ : List[Any] = 42 # the largest integer less than b_estimate lowerCamelCase__ : str = 42 # the largest integer less than b_estimate lowerCamelCase__ : Dict = 42 # the triangle number corresponding to b_floor lowerCamelCase__ : Optional[int] = 42 # the triangle number corresponding to b_ceil lowerCamelCase__ : Any = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): lowerCamelCase__ : str = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 lowerCamelCase__ : Optional[int] = floor(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = ceil(__lowerCamelCase ) lowerCamelCase__ : List[Any] = triangle_numbers[b_floor] lowerCamelCase__ : int = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): lowerCamelCase__ : Dict = triangle_b_first_guess * triangle_a lowerCamelCase__ : List[str] = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): lowerCamelCase__ : Any = triangle_b_second_guess * triangle_a lowerCamelCase__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'''{solution() = }''')
142
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __lowerCAmelCase ( pl.LightningModule ): def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] ): '''simple docstring''' super().__init__() a = model a = 2 a = nn.Linear(self.model.config.hidden_size , self.num_labels ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' pass def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: # load longformer model from model identifier a = LongformerModel.from_pretrained(__lowerCamelCase ) a = LightningModel(__lowerCamelCase ) a = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model a = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__lowerCamelCase ) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCamelCase : List[Any] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
228
0
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class a_ ( unittest.TestCase ): def lowercase__ ( self : int ): """simple docstring""" lowercase_ :Optional[Any] = get_activation("swish" ) self.assertIsInstance(_lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Tuple = get_activation("silu" ) self.assertIsInstance(_lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :List[str] = get_activation("mish" ) self.assertIsInstance(_lowerCamelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowercase__ ( self : Tuple ): """simple docstring""" lowercase_ :Union[str, Any] = get_activation("gelu" ) self.assertIsInstance(_lowerCamelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
368
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ): lowercase_ :Optional[Any] = len(__lowerCamelCase ) lowercase_ :List[Any] = [] for i in range(len(__lowerCamelCase ) - pat_len + 1 ): lowercase_ :Dict = True for j in range(__lowerCamelCase ): if s[i + j] != pattern[j]: lowercase_ :int = False break if match_found: position.append(__lowerCamelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
147
0
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ) ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: snake_case_ = ( 'Wrong input data\'s dimensions... ' f'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(UpperCAmelCase ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ = ( 'Wrong input data\'s shape... ' f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(UpperCAmelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: snake_case_ = ( 'Input data have different datatype... ' f'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(UpperCAmelCase ) snake_case_ = [] for value in value_array: snake_case_ = euclidean(UpperCAmelCase , dataset[0] ) snake_case_ = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ = euclidean(UpperCAmelCase , UpperCAmelCase ) if dist > temp_dist: snake_case_ = temp_dist snake_case_ = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return np.dot(UpperCAmelCase , UpperCAmelCase ) / (norm(UpperCAmelCase ) * norm(UpperCAmelCase )) if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class lowercase : def __init__( self , lowercase , ) -> Optional[int]: lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 32 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = """gelu""" lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 512 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def _snake_case ( self ) -> str: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: lowerCAmelCase = TFDistilBertModel(config=lowercase ) lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCAmelCase = model(lowercase ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase ) lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase ) lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, } lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: lowerCAmelCase = self.num_labels lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase ) lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any: lowerCAmelCase = self.num_choices lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase ) lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: lowerCAmelCase = self.num_labels lowerCAmelCase = TFDistilBertForTokenClassification(lowercase ) lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = self.prepare_config_and_inputs() ((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) _SCREAMING_SNAKE_CASE = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def _snake_case ( self ) -> Dict: lowerCAmelCase = TFDistilBertModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 ) def _snake_case ( self ) -> str: self.config_tester.run_common_tests() def _snake_case ( self ) -> int: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def _snake_case ( self ) -> Any: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def _snake_case ( self ) -> List[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def _snake_case ( self ) -> str: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def _snake_case ( self ) -> List[str]: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class lowercase ( unittest.TestCase ): @slow def _snake_case ( self ) -> Any: lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(lowercase )[0] lowerCAmelCase = [1, 6, 768] self.assertEqual(output.shape , lowercase ) lowerCAmelCase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
46
0
"""simple docstring""" import argparse __A : Any = '''docs/source/_static/js/custom.js''' def A_ ( snake_case_ : Optional[Any] ): '''simple docstring''' with open(snake_case_ ,encoding="""utf-8""" ,newline="""\n""" ) as f: UpperCamelCase : Dict = f.readlines() UpperCamelCase : List[str] = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 UpperCamelCase : Any = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(snake_case_ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(snake_case_ ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __A : Tuple = parser.parse_args() update_custom_js(args.version)
360
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowerCamelCase ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ): super().__init__() UpperCamelCase : int = nn.ModuleList( [ TransformeraDModel( num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCamelCase : Optional[Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCamelCase : List[Any] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCamelCase : int = [1, 0] def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ): UpperCamelCase : Dict = hidden_states UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCamelCase : str = self.transformer_index_for_condition[i] UpperCamelCase : Any = self.transformers[transformer_index]( SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCamelCase : List[str] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
27
0
from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A( a ): def __init__( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __a = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_snake_case , scheduler=_snake_case ) @torch.no_grad() def __call__( self , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(self.unet.config.sample_size , _snake_case ): __a = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __a = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __a = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __a = self.unet(_snake_case , _snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __a = self.scheduler.step( _snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case ).prev_sample __a = (image / 2 + 0.5).clamp(0 , 1 ) __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(_snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case )
6
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Dict = parent A : int = batch_size A : List[Any] = seq_length A : Optional[Any] = is_training A : Union[str, Any] = use_input_mask A : Any = use_token_type_ids A : List[Any] = use_labels A : List[str] = vocab_size A : int = hidden_size A : Optional[int] = num_hidden_layers A : List[str] = num_attention_heads A : Tuple = intermediate_size A : int = hidden_act A : str = hidden_dropout_prob A : Any = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : List[str] = type_vocab_size A : Any = type_sequence_label_size A : int = initializer_range A : int = num_labels A : List[Any] = num_choices A : int = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = None if self.use_input_mask: A : Any = random_attention_mask([self.batch_size, self.seq_length] ) A : Any = None if self.use_token_type_ids: A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Optional[int] = None A : str = None A : Optional[Any] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) A : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Union[str, Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : Any = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" A : Optional[Any] = True A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : Union[str, Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" A : str = True A : Dict = True A : Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() # first forward pass A : Union[str, Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : str = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : int = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() A : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : str = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Union[str, Any] = config_and_inputs A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else () __magic_name__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[Any] = OpenLlamaModelTester(self ) A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : int = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = 3 A : Optional[Any] = input_dict['''input_ids'''] A : Optional[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() A : List[str] = 3 A : int = '''single_label_classification''' A : Dict = input_dict['''input_ids'''] A : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : Tuple = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A : Any = 3 A : Union[str, Any] = '''multi_label_classification''' A : Any = input_dict['''input_ids'''] A : Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() A : Optional[Any] = ids_tensor([1, 10] , config.vocab_size ) A : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) original_model.to(SCREAMING_SNAKE_CASE ) original_model.eval() A : Dict = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state A : str = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : Dict = {'''type''': scaling_type, '''factor''': 10.0} A : List[str] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) scaled_model.to(SCREAMING_SNAKE_CASE ) scaled_model.eval() A : Optional[Any] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state A : List[str] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
311
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
1
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def __a ( __lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(__lowerCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__lowerCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ : List[Any] = [] for i in range(__lowerCamelCase ): UpperCAmelCase_ : str = i / num_diffusion_timesteps UpperCAmelCase_ : Any = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) ) return torch.tensor(__lowerCamelCase, dtype=torch.floataa ) class A_ (lowercase__ ,lowercase__ ): '''simple docstring''' @register_to_config def __init__( self , lowercase_ = 1000 , lowercase_ = "fixed_small_log" , lowercase_ = True , lowercase_ = 1.0 , lowercase_ = "epsilon" , lowercase_ = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) UpperCAmelCase_ : Any = betas_for_alpha_bar(lowercase_ ) UpperCAmelCase_ : str = 1.0 - self.betas UpperCAmelCase_ : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) UpperCAmelCase_ : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCAmelCase_ : Optional[Any] = 1.0 # setable values UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Dict = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() ) UpperCAmelCase_ : str = variance_type def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" return sample def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Tuple = num_inference_steps UpperCAmelCase_ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCAmelCase_ : Optional[Any] = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCAmelCase_ : Optional[Any] = torch.from_numpy(lowercase_ ).to(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ): """simple docstring""" if prev_timestep is None: UpperCAmelCase_ : Optional[Any] = t - 1 UpperCAmelCase_ : Dict = self.alphas_cumprod[t] UpperCAmelCase_ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_ : List[Any] = 1 - alpha_prod_t UpperCAmelCase_ : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_ : Union[str, Any] = self.betas[t] else: UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase_ : List[str] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCAmelCase_ : List[Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCAmelCase_ : Tuple = torch.log(torch.clamp(lowercase_ , min=1E-2_0 ) ) UpperCAmelCase_ : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCAmelCase_ : Union[str, Any] = variance.log() UpperCAmelCase_ : int = beta.log() UpperCAmelCase_ : Any = (predicted_variance + 1) / 2 UpperCAmelCase_ : Tuple = frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_=None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : int = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowercase_ , sample.shape[1] , dim=1 ) else: UpperCAmelCase_ : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: UpperCAmelCase_ : int = t - 1 UpperCAmelCase_ : List[Any] = self.alphas_cumprod[t] UpperCAmelCase_ : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_ : Tuple = 1 - alpha_prod_t UpperCAmelCase_ : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_ : List[Any] = self.betas[t] UpperCAmelCase_ : str = self.alphas[t] else: UpperCAmelCase_ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev UpperCAmelCase_ : Union[str, Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase_ : int = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase_ : Any = torch.clamp( lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCAmelCase_ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_ : Union[str, Any] = 0 if t > 0: UpperCAmelCase_ : str = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device ) UpperCAmelCase_ : str = self._get_variance( lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , ) if self.variance_type == "fixed_small_log": UpperCAmelCase_ : Optional[int] = variance elif self.variance_type == "learned_range": UpperCAmelCase_ : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) UpperCAmelCase_ : Optional[int] = variance * variance_noise UpperCAmelCase_ : Any = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" # Make sure alphas_cumprod and timestep have same device and dtype as original_samples UpperCAmelCase_ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCAmelCase_ : Optional[int] = timesteps.to(original_samples.device ) UpperCAmelCase_ : List[str] = alphas_cumprod[timesteps] ** 0.5 UpperCAmelCase_ : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_ : Dict = sqrt_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_ : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCAmelCase_ : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_ : List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
61
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ): """simple docstring""" UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = scope UpperCAmelCase_ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ : int = (image_size // patch_size) ** 2 UpperCAmelCase_ : Optional[Any] = num_patches + 2 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = self.type_sequence_label_size UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Dict = config_and_inputs UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Tuple = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[str] = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = DeiTModelTester(self ) UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowercase_ ) UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : str = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ): """simple docstring""" UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" if not self.model_tester.is_training: return UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCAmelCase_ : Optional[int] = model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : Dict = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCAmelCase_ : List[str] = model_class(lowercase_ ) model.gradient_checkpointing_enable() model.to(lowercase_ ) model.train() UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : Any = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_ ), *get_values(lowercase_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): UpperCAmelCase_ : str = problem_type["title"] UpperCAmelCase_ : List[Any] = problem_type["num_labels"] UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if problem_type["num_labels"] > 1: UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_ ) as warning_list: UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __a ( ): UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( lowercase_ ) UpperCAmelCase_ : List[str] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowercase_ ) # verify the logits UpperCAmelCase_ : List[str] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) UpperCAmelCase_ : str = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" ) UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase_ : int = model(lowercase_ )
61
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { """configuration_informer""": [ """INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """InformerForPrediction""", """InformerModel""", """InformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
100
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 _a = get_tests_dir("""fixtures/dummy-config.json""") class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = 0 def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''')) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = AutoConfig.from_pretrained('''bert-base-uncased''') self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = AutoConfig.from_pretrained(__a) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = AutoConfig.from_pretrained(__a) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = AutoConfig.for_model('''roberta''') self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. _UpperCamelCase = os.path.join(__a , '''fake-roberta''') os.makedirs(__a , exist_ok=__a) with open(os.path.join(__a , '''config.json''') , '''w''') as f: f.write(json.dumps({})) _UpperCamelCase = AutoConfig.from_pretrained(__a) self.assertEqual(type(__a) , __a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' try: AutoConfig.register('''custom''' , __a) # Wrong model type will raise an error with self.assertRaises(__a): AutoConfig.register('''model''' , __a) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a): AutoConfig.register('''bert''' , __a) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCamelCase = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a) _UpperCamelCase = AutoConfig.from_pretrained(__a) self.assertIsInstance(__a , __a) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( __a , '''bert-base is not a local folder and is not a valid model identifier'''): _UpperCamelCase = AutoConfig.from_pretrained('''bert-base''') def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( __a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): _UpperCamelCase = AutoConfig.from_pretrained(__a , revision='''aaaaaa''') def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex( __a , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''') def UpperCAmelCase ( self) -> int: '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''') # If remote code is disabled, we can't load this config. with self.assertRaises(__a): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__a) _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__a) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''') # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a) _UpperCamelCase = AutoConfig.from_pretrained(__a , trust_remote_code=__a) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''') def UpperCAmelCase ( self) -> Any: '''simple docstring''' class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'new-model' try: AutoConfig.register('''new-model''' , __a) # If remote code is not set, the default is to use local _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''') self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''') # If remote code is disabled, we load the local one. _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__a) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''') # If remote is enabled, we load from the Hub _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__a) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''') finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
100
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase = { '''yjernite/retribert-base-uncased''': 512, } _lowerCAmelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = VOCAB_FILES_NAMES __lowercase : int = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : str = PRETRAINED_INIT_CONFIGURATION __lowercase : str = RetriBertTokenizer __lowercase : str = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase="[UNK]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="[PAD]" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Tuple: super().__init__( __UpperCAmelCase ,tokenizer_file=__UpperCAmelCase ,do_lower_case=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,tokenize_chinese_chars=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,__UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,__UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,__UpperCAmelCase ) != tokenize_chinese_chars ): lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCAmelCase__ : Union[str, Any] = do_lower_case lowerCAmelCase__ : List[Any] = strip_accents lowerCAmelCase__ : Optional[int] = tokenize_chinese_chars lowerCAmelCase__ : Optional[int] = normalizer_class(**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = do_lower_case def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[Any]: lowerCAmelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: lowerCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase ,name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
37
from torch import nn def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
147
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
251
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_): @register_to_config def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = False , )-> Optional[Any]: '''simple docstring''' super().__init__() UpperCamelCase = nn.Embedding(A_ , A_ ) UpperCamelCase = nn.Embedding(A_ , A_ ) UpperCamelCase = False UpperCamelCase = nn.Dropout(p=A_ ) UpperCamelCase = TaConfig( vocab_size=A_ , d_model=A_ , num_heads=A_ , d_kv=A_ , d_ff=A_ , dropout_rate=A_ , feed_forward_proj=A_ , is_decoder=A_ , is_encoder_decoder=A_ , ) UpperCamelCase = nn.ModuleList() for lyr_num in range(A_ ): UpperCamelCase = TaBlock(A_ ) self.encoders.append(A_ ) UpperCamelCase = TaLayerNorm(A_ ) UpperCamelCase = nn.Dropout(p=A_ ) def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple: '''simple docstring''' UpperCamelCase = self.token_embedder(A_ ) UpperCamelCase = encoder_input_tokens.shape[1] UpperCamelCase = torch.arange(A_ , device=encoder_input_tokens.device ) x += self.position_encoding(A_ ) UpperCamelCase = self.dropout_pre(A_ ) # inverted the attention mask UpperCamelCase = encoder_input_tokens.size() UpperCamelCase = self.get_extended_attention_mask(A_ , A_ ) for lyr in self.encoders: UpperCamelCase = lyr(A_ , A_ )[0] UpperCamelCase = self.layer_norm(A_ ) return self.dropout_post(A_ ), encoder_inputs_mask
251
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
54
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __lowerCamelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = SwinConfig(image_size=1_9_2 ) if "base" in model_name: lowerCAmelCase__ = 6 lowerCAmelCase__ = 1_2_8 lowerCAmelCase__ = (2, 2, 1_8, 2) lowerCAmelCase__ = (4, 8, 1_6, 3_2) elif "large" in model_name: lowerCAmelCase__ = 1_2 lowerCAmelCase__ = 1_9_2 lowerCAmelCase__ = (2, 2, 1_8, 2) lowerCAmelCase__ = (6, 1_2, 2_4, 4_8) else: raise ValueError('Model not supported, only supports base and large variants' ) lowerCAmelCase__ = window_size lowerCAmelCase__ = embed_dim lowerCAmelCase__ = depths lowerCAmelCase__ = num_heads return config def __lowerCamelCase ( lowerCAmelCase__ ): if "encoder.mask_token" in name: lowerCAmelCase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' ) if "encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' ) if "attn.proj" in name: lowerCAmelCase__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase__ = name.replace('mlp.fc2' , 'output.dense' ) if name == "encoder.norm.weight": lowerCAmelCase__ = 'layernorm.weight' if name == "encoder.norm.bias": lowerCAmelCase__ = 'layernorm.bias' if "decoder" in name: pass else: lowerCAmelCase__ = 'swin.' + name return name def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase__ ) if "attn_mask" in key: pass elif "qkv" in key: lowerCAmelCase__ = key.split('.' ) lowerCAmelCase__ = int(key_split[2] ) lowerCAmelCase__ = int(key_split[4] ) lowerCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[ :dim ] lowerCAmelCase__ = val[ dim : dim * 2 ] lowerCAmelCase__ = val[ -dim: ] else: lowerCAmelCase__ = val return orig_state_dict def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = torch.load(lowerCAmelCase__ , map_location='cpu' )['model'] lowerCAmelCase__ = get_swin_config(lowerCAmelCase__ ) lowerCAmelCase__ = SwinForMaskedImageModeling(lowerCAmelCase__ ) model.eval() lowerCAmelCase__ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase__ = ViTImageProcessor(size={'height': 1_9_2, 'width': 1_9_2} ) lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) lowerCAmelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ) with torch.no_grad(): lowerCAmelCase__ = model(**lowerCAmelCase__ ).logits print(outputs.keys() ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase__ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
119
def __lowerCamelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = len(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ): for j in range(i + 1 , lowerCAmelCase__ ): if numbers[j] < numbers[i]: lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(',')] print(exchange_sort(unsorted))
119
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : List[str] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : str = use_labels UpperCAmelCase : Any = vocab_size UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : Tuple = attention_probs_dropout_prob UpperCAmelCase : Any = max_position_embeddings UpperCAmelCase : Union[str, Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : List[str] = initializer_range UpperCAmelCase : int = num_labels UpperCAmelCase : int = num_choices UpperCAmelCase : Any = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : str = None if self.use_input_mask: UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Tuple = None if self.use_token_type_ids: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : str = None UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , use_stable_embedding=snake_case , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Union[str, Any] = model(snake_case , attention_mask=snake_case ) UpperCAmelCase : List[Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = True UpperCAmelCase : List[str] = OpenLlamaModel(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Union[str, Any] = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) UpperCAmelCase : str = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , ) UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : List[str] = True UpperCAmelCase : Any = True UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=snake_case ) model.to(snake_case ) model.eval() # first forward pass UpperCAmelCase : str = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , ) UpperCAmelCase : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )["hidden_states"][0] UpperCAmelCase : int = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["hidden_states"][0] # select random slice UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Tuple = config_and_inputs UpperCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : str = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = OpenLlamaModelTester(self ) UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase : str = type self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = 3 UpperCAmelCase : Union[str, Any] = input_dict["input_ids"] UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case ) UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : List[str] = OpenLlamaForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Any = 3 UpperCAmelCase : Tuple = "single_label_classification" UpperCAmelCase : Any = input_dict["input_ids"] UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(snake_case ) UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : str = OpenLlamaForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = 3 UpperCAmelCase : Dict = "multi_label_classification" UpperCAmelCase : Optional[Any] = input_dict["input_ids"] UpperCAmelCase : Tuple = input_ids.ne(1 ).to(snake_case ) UpperCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase : Any = OpenLlamaForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" ) def A_ ( self ): '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)] ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Any = ids_tensor([1, 1_0] , config.vocab_size ) UpperCAmelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : List[Any] = OpenLlamaModel(snake_case ) original_model.to(snake_case ) original_model.eval() UpperCAmelCase : int = original_model(snake_case ).last_hidden_state UpperCAmelCase : Union[str, Any] = original_model(snake_case ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = {"type": scaling_type, "factor": 10.0} UpperCAmelCase : List[str] = OpenLlamaModel(snake_case ) scaled_model.to(snake_case ) scaled_model.eval() UpperCAmelCase : Optional[int] = scaled_model(snake_case ).last_hidden_state UpperCAmelCase : Union[str, Any] = scaled_model(snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging A_ : List[Any] = logging.get_logger(__name__) class _a : '''simple docstring''' UpperCAmelCase__: str UpperCAmelCase__: str = None @staticmethod def __A ( ): raise NotImplementedError def __A ( self , A__ , A__ , A__ , **A__ ): raise NotImplementedError def __A ( self , A__ ): raise NotImplementedError def __A ( self ): if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def __A ( cls ): return F"""`pip install {cls.pip_package or cls.name}`""" class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: Optional[Any] = '''optuna''' @staticmethod def __A ( ): return is_optuna_available() def __A ( self , A__ , A__ , A__ , **A__ ): return run_hp_search_optuna(A__ , A__ , A__ , **A__ ) def __A ( self , A__ ): return default_hp_space_optuna(A__ ) class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: str = '''ray''' UpperCAmelCase__: Optional[Any] = '''\'ray[tune]\'''' @staticmethod def __A ( ): return is_ray_available() def __A ( self , A__ , A__ , A__ , **A__ ): return run_hp_search_ray(A__ , A__ , A__ , **A__ ) def __A ( self , A__ ): return default_hp_space_ray(A__ ) class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: Optional[int] = '''sigopt''' @staticmethod def __A ( ): return is_sigopt_available() def __A ( self , A__ , A__ , A__ , **A__ ): return run_hp_search_sigopt(A__ , A__ , A__ , **A__ ) def __A ( self , A__ ): return default_hp_space_sigopt(A__ ) class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: str = '''wandb''' @staticmethod def __A ( ): return is_wandb_available() def __A ( self , A__ , A__ , A__ , **A__ ): return run_hp_search_wandb(A__ , A__ , A__ , **A__ ) def __A ( self , A__ ): return default_hp_space_wandb(A__ ) A_ : Optional[int] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCamelCase () -> str: A__ : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase_ ) > 0: A__ : Union[str, Any] = available_backends[0].name if len(lowercase_ ) > 1: logger.info( f"""{len(lowercase_ )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
141
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: Dict = '''falcon''' UpperCAmelCase__: Any = ['''past_key_values'''] def __init__( self , A__=6_5024 , A__=4544 , A__=32 , A__=71 , A__=1e-5 , A__=0.0_2 , A__=True , A__=0.0 , A__=0.0 , A__=None , A__=False , A__=False , A__=True , A__=True , A__=False , A__=11 , A__=11 , **A__ , ): A__ : Dict = vocab_size # Backward compatibility with n_embed kwarg A__ : Union[str, Any] = kwargs.pop("""n_embed""" , A__ ) A__ : Optional[Any] = hidden_size if n_embed is None else n_embed A__ : Any = num_hidden_layers A__ : Any = num_attention_heads A__ : Optional[Any] = layer_norm_epsilon A__ : Tuple = initializer_range A__ : Tuple = use_cache A__ : str = hidden_dropout A__ : List[str] = attention_dropout A__ : List[Any] = bos_token_id A__ : Optional[Any] = eos_token_id A__ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads A__ : List[str] = alibi A__ : Tuple = new_decoder_architecture A__ : List[str] = multi_query # Ignored when new_decoder_architecture is True A__ : List[Any] = parallel_attn A__ : int = bias super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ ) @property def __A ( self ): return self.hidden_size // self.num_attention_heads @property def __A ( self ): return not self.alibi
141
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ): """simple docstring""" __lowercase : Union[str, Any] = StableDiffusionXLImgaImgPipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} __lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} __lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowercase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) __SCREAMING_SNAKE_CASE = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = CLIPTextModelWithProjection(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0): __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = image / 2 + 0.5 if str(lowerCAmelCase__).startswith("""mps"""): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = sd_pipe(**lowerCAmelCase__).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __SCREAMING_SNAKE_CASE = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def snake_case_ ( self): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3) def snake_case_ ( self): super().test_inference_batch_single_identical(expected_max_diff=3E-3) def snake_case_ ( self): pass def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) # forward without prompt embeds __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = 3 * ["""this is a negative prompt"""] __SCREAMING_SNAKE_CASE = negative_prompt __SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]] __SCREAMING_SNAKE_CASE = sd_pipe(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1] # forward with prompt embeds __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = 3 * ["""this is a negative prompt"""] __SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""")] ( ( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) , ) = sd_pipe.encode_prompt(lowerCAmelCase__ , negative_prompt=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = sd_pipe( **lowerCAmelCase__ , prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , pooled_prompt_embeds=lowerCAmelCase__ , negative_pooled_prompt_embeds=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0): __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = np.random.RandomState(lowerCAmelCase__).standard_normal((1, 4, 6_4, 6_4)) __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase__).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def snake_case_ ( self): __SCREAMING_SNAKE_CASE = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""") pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.get_inputs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) __SCREAMING_SNAKE_CASE = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06]) assert np.abs(image_slice - expected_slice).max() < 7E-3
100
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Tuple = '''dandelin/vilt-b32-finetuned-vqa''' __lowercase : str = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) __lowercase : Optional[int] = '''image_qa''' __lowercase : Union[str, Any] = AutoProcessor __lowercase : Optional[Any] = AutoModelForVisualQuestionAnswering __lowercase : List[str] = ['''image''', '''text'''] __lowercase : Optional[Any] = ['''text'''] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): requires_backends(self , ["""vision"""]) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): return self.pre_processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""") def snake_case_ ( self , lowerCAmelCase__): with torch.no_grad(): return self.model(**lowerCAmelCase__).logits def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = outputs.argmax(-1).item() return self.model.config.idalabel[idx]
100
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Optional[int] = logging.get_logger(__name__) def UpperCamelCase ( __magic_name__ : Tuple=None , __magic_name__ : List[Any]=None ) -> Union[str, Any]: """simple docstring""" return field(default_factory=lambda: default , metadata=__magic_name__ ) @dataclass class A : '''simple docstring''' A__ = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) A__ = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) A__ = list_field( default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) A__ = field( default=UpperCAmelCase__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) A__ = field( default=UpperCAmelCase__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) A__ = field( default=UpperCAmelCase__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Benchmark training of model'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Verbose memory tracing'''} ) A__ = field( default=UpperCAmelCase__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) A__ = field( default=UpperCAmelCase__ , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Trace memory line by line'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Save result to a CSV file'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Save all print statements in a log file'''} ) A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Whether to print environment information'''} ) A__ = field( default=UpperCAmelCase__ , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) A__ = field( default=F"""inference_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) A__ = field( default=F"""inference_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) A__ = field( default=F"""train_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) A__ = field( default=F"""train_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) A__ = field( default=F"""env_info_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) A__ = field( default=F"""log_{round(time() )}.csv""" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) A__ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) A__ = field( default=UpperCAmelCase__ , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , _UpperCAmelCase , ) def lowerCamelCase__ (self : int ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowerCamelCase__ (self : Optional[int] ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def lowerCamelCase__ (self : Dict ) -> Optional[int]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
368
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = 42 A__ = 42 def __init__(self : Union[str, Any] , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : ScoreSdeVeScheduler ) -> List[str]: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__(self : Optional[Any] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 2000 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" lowercase__ = self.unet.config.sample_size lowercase__ = (batch_size, 3, img_size, img_size) lowercase__ = self.unet lowercase__ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase ) * self.scheduler.init_noise_sigma lowercase__ = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCAmelCase ) self.scheduler.set_sigmas(_UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_correct(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample # prediction step lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_pred(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ) lowercase__ , lowercase__ = output.prev_sample, output.prev_sample_mean lowercase__ = sample_mean.clamp(0 , 1 ) lowercase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCAmelCase )
146
0
'''simple docstring''' from itertools import product def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = sides_number SCREAMING_SNAKE_CASE : str = max_face_number * dice_number SCREAMING_SNAKE_CASE : Tuple = [0] * (max_total + 1) SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : int = range(__UpperCamelCase ,max_face_number + 1 ) for dice_numbers in product(__UpperCamelCase ,repeat=__UpperCamelCase ): SCREAMING_SNAKE_CASE : Any = sum(__UpperCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = total_frequency_distribution( sides_number=4 ,dice_number=9 ) SCREAMING_SNAKE_CASE : str = total_frequency_distribution( sides_number=6 ,dice_number=6 ) SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = 9 SCREAMING_SNAKE_CASE : Dict = 4 * 9 SCREAMING_SNAKE_CASE : str = 6 for peter_total in range(__UpperCamelCase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) SCREAMING_SNAKE_CASE : Optional[Any] = (4**9) * (6**6) SCREAMING_SNAKE_CASE : int = peter_wins_count / total_games_number SCREAMING_SNAKE_CASE : Optional[Any] = round(__UpperCamelCase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
251
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowercase__( __UpperCamelCase: Union[dict, list, tuple, torch.Tensor] ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] if isinstance(__UpperCamelCase ,__UpperCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(__UpperCamelCase ) ) elif isinstance(__UpperCamelCase ,(list, tuple) ): for t in tree: shapes.extend(_fetch_dims(__UpperCamelCase ) ) elif isinstance(__UpperCamelCase ,torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('Not supported' ) return shapes @torch.jit.ignore def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple[int, ...] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] for d in reversed(__UpperCamelCase ): idx.append(flat_idx % d ) SCREAMING_SNAKE_CASE : Tuple = flat_idx // d return tuple(reversed(__UpperCamelCase ) ) @torch.jit.ignore def lowercase__( __UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Optional[Sequence[bool]] = None ,__UpperCamelCase: Optional[Sequence[bool]] = None ,): """simple docstring""" def reduce_edge_list(__UpperCamelCase: List[bool] ) -> None: SCREAMING_SNAKE_CASE : List[str] = True for i in range(len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally SCREAMING_SNAKE_CASE : str = l[reversed_idx] if start_edges is None: SCREAMING_SNAKE_CASE : int = [s == 0 for s in start] reduce_edge_list(__UpperCamelCase ) if end_edges is None: SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(__UpperCamelCase ,__UpperCamelCase )] reduce_edge_list(__UpperCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(__UpperCamelCase ) == 0: return [()] elif len(__UpperCamelCase ) == 1: return [(slice(start[0] ,end[0] + 1 ),)] SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = [] SCREAMING_SNAKE_CASE : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(__UpperCamelCase ,__UpperCamelCase ): if s == e: path_list.append(slice(__UpperCamelCase ,s + 1 ) ) else: break SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase ) # start == end, and we're done if divergence_idx == len(__UpperCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx] return tuple( path + (slice(__UpperCamelCase ,sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE : List[Any] = end[divergence_idx] return tuple( path + (slice(__UpperCamelCase ,edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) SCREAMING_SNAKE_CASE : List[str] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowercase__( __UpperCamelCase: torch.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = t.shape[:no_batch_dims] SCREAMING_SNAKE_CASE : str = list(_flat_idx_to_idx(__UpperCamelCase ,__UpperCamelCase ) ) # _get_minimal_slice_set is inclusive SCREAMING_SNAKE_CASE : int = list(_flat_idx_to_idx(flat_end - 1 ,__UpperCamelCase ) ) # Get an ordered list of slices to perform SCREAMING_SNAKE_CASE : str = _get_minimal_slice_set( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) SCREAMING_SNAKE_CASE : Dict = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Dict[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: Any = None ,__UpperCamelCase: bool = False ,): """simple docstring""" if not (len(__UpperCamelCase ) > 0): raise ValueError('Must provide at least one input' ) SCREAMING_SNAKE_CASE : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )] SCREAMING_SNAKE_CASE : Optional[int] = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] ) def _prep_inputs(__UpperCamelCase: torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: SCREAMING_SNAKE_CASE : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) SCREAMING_SNAKE_CASE : str = t.reshape(-1 ,*t.shape[no_batch_dims:] ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if _out is not None: SCREAMING_SNAKE_CASE : Tuple = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out ) SCREAMING_SNAKE_CASE : List[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d SCREAMING_SNAKE_CASE : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(__UpperCamelCase: torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : List[str] = prepped_outputs for _ in range(__UpperCamelCase ): # Chunk the input if not low_mem: SCREAMING_SNAKE_CASE : List[str] = _select_chunk else: SCREAMING_SNAKE_CASE : Union[str, Any] = partial( _chunk_slice ,flat_start=__UpperCamelCase ,flat_end=min(__UpperCamelCase ,i + chunk_size ) ,no_batch_dims=len(__UpperCamelCase ) ,) SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(__UpperCamelCase ,__UpperCamelCase ) # Run the layer on the chunk SCREAMING_SNAKE_CASE : int = layer(**__UpperCamelCase ) # Allocate space for the output if out is None: SCREAMING_SNAKE_CASE : List[Any] = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__UpperCamelCase ) # Put the chunk in its pre-allocated space if isinstance(__UpperCamelCase ,__UpperCamelCase ): def assign(__UpperCamelCase: dict ,__UpperCamelCase: dict ) -> None: for k, v in da.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): assign(__UpperCamelCase ,da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: SCREAMING_SNAKE_CASE : Union[str, Any] = da[k] assign(__UpperCamelCase ,__UpperCamelCase ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): for xa, xa in zip(__UpperCamelCase ,__UpperCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: SCREAMING_SNAKE_CASE : List[str] = xa elif isinstance(__UpperCamelCase ,torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: SCREAMING_SNAKE_CASE : Optional[int] = output_chunk else: raise ValueError('Not supported' ) i += chunk_size SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__UpperCamelCase ) return out class _a : '''simple docstring''' def __init__( self, A = 512, ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = max_chunk_size SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[tuple] = None def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' logging.info('Tuning chunk size...' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )] SCREAMING_SNAKE_CASE : List[str] = [c for c in candidates if c > min_chunk_size] SCREAMING_SNAKE_CASE : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(A ) -> bool: try: with torch.no_grad(): fn(*A, chunk_size=A ) return True except RuntimeError: return False SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = len(A ) - 1 while i > min_viable_chunk_size_index: SCREAMING_SNAKE_CASE : Any = test_chunk_size(candidates[i] ) if not viable: SCREAMING_SNAKE_CASE : List[Any] = (min_viable_chunk_size_index + i) // 2 else: SCREAMING_SNAKE_CASE : Optional[Any] = i SCREAMING_SNAKE_CASE : List[Any] = (i + len(A ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = True for aa, aa in zip(A, A ): assert type(A ) == type(A ) if isinstance(A, (list, tuple) ): consistent &= self._compare_arg_caches(A, A ) elif isinstance(A, A ): SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )] SCREAMING_SNAKE_CASE : Optional[int] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )] consistent &= self._compare_arg_caches(A, A ) else: consistent &= aa == aa return consistent def UpperCamelCase_ ( self, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : tuple = tree_map(lambda A : a.shape if isinstance(A, torch.Tensor ) else a, A, A ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(A ) SCREAMING_SNAKE_CASE : str = self._compare_arg_caches(self.cached_arg_data, A ) else: # Otherwise, we can reuse the precomputed value SCREAMING_SNAKE_CASE : Union[str, Any] = False if not consistent: SCREAMING_SNAKE_CASE : Any = self._determine_favorable_chunk_size( A, A, A, ) SCREAMING_SNAKE_CASE : Dict = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
251
1
'''simple docstring''' import os from math import logaa def UpperCAmelCase__ ( UpperCAmelCase__ = "base_exp.txt" ) -> int: A_ = 0 A_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase__ ), UpperCAmelCase__ ) ) ): A_ , A_ = list(map(UpperCAmelCase__, line.split(""",""" ) ) ) if x * logaa(UpperCAmelCase__ ) > largest: A_ = x * logaa(UpperCAmelCase__ ) A_ = i + 1 return result if __name__ == "__main__": print(solution())
101
'''simple docstring''' import requests __lowerCamelCase = '''''' # <-- Put your OpenWeatherMap appid here! __lowerCamelCase = '''https://api.openweathermap.org/data/2.5/''' def UpperCAmelCase__ ( UpperCAmelCase__ = "Chicago", UpperCAmelCase__ = APPID ) -> dict: return requests.get(URL_BASE + """weather""", params=locals() ).json() def UpperCAmelCase__ ( UpperCAmelCase__ = "Kolkata, India", UpperCAmelCase__ = APPID ) -> dict: return requests.get(URL_BASE + """forecast""", params=locals() ).json() def UpperCAmelCase__ ( UpperCAmelCase__ = 55.68, UpperCAmelCase__ = 12.57, UpperCAmelCase__ = APPID ) -> dict: return requests.get(URL_BASE + """onecall""", params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowerCamelCase = input('''Enter a location:''').strip() if location: pprint(current_weather(location)) else: break
101
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ) -> Any: UpperCamelCase : Optional[int] = old_name if "patch_embed" in old_name: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = old_name.split('.' ) if layer == "0": UpperCamelCase : Optional[Any] = old_name.replace('0' , 'convolution1' ) elif layer == "1": UpperCamelCase : Optional[Any] = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": UpperCamelCase : Union[str, Any] = old_name.replace('3' , 'convolution2' ) else: UpperCamelCase : Any = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , snake_case__ ): UpperCamelCase : str = R'\b\d{2}\b' if bool(re.search(snake_case__ , snake_case__ ) ): UpperCamelCase : Dict = re.search(R'\d\.\d\d.' , snake_case__ ).group() else: UpperCamelCase : int = re.search(R'\d\.\d.' , snake_case__ ).group() if int(match[0] ) < 6: UpperCamelCase : Union[str, Any] = old_name.replace(snake_case__ , '' ) UpperCamelCase : Dict = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) UpperCamelCase : Optional[Any] = 'intermediate_stages.' + trimmed_name else: UpperCamelCase : Any = old_name.replace(snake_case__ , '' ) if int(match[2] ) < num_meta4D_last_stage: UpperCamelCase : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: UpperCamelCase : str = str(int(match[2] ) - num_meta4D_last_stage ) UpperCamelCase : Tuple = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: UpperCamelCase : int = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: UpperCamelCase : Any = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: UpperCamelCase : Union[str, Any] = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: UpperCamelCase : Dict = trimmed_name.replace('fc2' , 'linear_out' ) UpperCamelCase : Union[str, Any] = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , snake_case__ ): UpperCamelCase : Dict = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: UpperCamelCase : Dict = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): UpperCamelCase : Dict = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): UpperCamelCase : List[str] = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: UpperCamelCase : List[Any] = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: UpperCamelCase : Tuple = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: UpperCamelCase : Union[str, Any] = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: UpperCamelCase : Optional[int] = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": UpperCamelCase : List[str] = new_name.replace('norm' , 'layernorm' ) UpperCamelCase : Any = 'efficientformer.' + new_name else: UpperCamelCase : Optional[Any] = 'efficientformer.encoder.' + new_name return new_name def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[int]: for key in checkpoint.copy().keys(): UpperCamelCase : int = checkpoint.pop(snake_case__ ) UpperCamelCase : List[str] = val return checkpoint def UpperCamelCase ( ) -> Dict: UpperCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return image def UpperCamelCase ( snake_case__ : Path , snake_case__ : Path , snake_case__ : Path , snake_case__ : bool ) -> Union[str, Any]: UpperCamelCase : List[str] = torch.load(snake_case__ , map_location='cpu' )['model'] UpperCamelCase : str = EfficientFormerConfig.from_json_file(snake_case__ ) UpperCamelCase : Union[str, Any] = EfficientFormerForImageClassificationWithTeacher(snake_case__ ) UpperCamelCase : Optional[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) UpperCamelCase : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1 UpperCamelCase : str = convert_torch_checkpoint(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() UpperCamelCase : Any = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image UpperCamelCase : Optional[int] = prepare_img() UpperCamelCase : Tuple = 256 UpperCamelCase : int = 224 UpperCamelCase : Tuple = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) UpperCamelCase : int = processor(images=snake_case__ , return_tensors='pt' ).pixel_values # original processing pipeline UpperCamelCase : int = Compose( [ Resize(snake_case__ , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(snake_case__ ), ToTensor(), Normalize(snake_case__ , snake_case__ ), ] ) UpperCamelCase : Union[str, Any] = image_transforms(snake_case__ ).unsqueeze(0 ) assert torch.allclose(snake_case__ , snake_case__ ) UpperCamelCase : str = model(snake_case__ ) UpperCamelCase : List[str] = outputs.logits UpperCamelCase : List[Any] = (1, 1000) if "l1" in model_name: UpperCamelCase : int = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , snake_case__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: UpperCamelCase : str = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , snake_case__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: UpperCamelCase : List[str] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(snake_case__ ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=snake_case__ , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=snake_case__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) __UpperCAmelCase = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
119
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int: # Initialise PyTorch model UpperCamelCase : Any = MobileBertConfig.from_json_file(snake_case__ ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCamelCase : Tuple = MobileBertForPreTraining(snake_case__ ) # Load weights from tf checkpoint UpperCamelCase : int = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
119
1
from math import pow, sqrt def __lowerCAmelCase ( *a__ ) -> bool: __a = len(a__ ) > 0 and all(value > 0.0 for value in values ) return result def __lowerCAmelCase ( a__ , a__ ) -> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ ) else ValueError('''Input Error: Molar mass values must greater than 0.''' ) ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(a__ , a__ , a__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) )
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __UpperCamelCase ( lowercase__ : List[Any] ): '''simple docstring''' monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings', set() ) @pytest.fixture def __UpperCamelCase ( lowercase__ : Union[str, Any] ): '''simple docstring''' class lowerCAmelCase : def __init__( self : Any , __lowercase : Optional[Any] ): """simple docstring""" __lowercase =metric_id class lowerCAmelCase : lowerCAmelCase_ = [MetricMock(A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def snake_case ( self : str ): """simple docstring""" return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub', HfhMock() ) @pytest.mark.parametrize( 'func, args', [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : Tuple, lowercase__ : Optional[Any], lowercase__ : str, lowercase__ : Tuple ): '''simple docstring''' if "tmp_path" in args: __lowercase =tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(lowercase__, match='https://huggingface.co/docs/evaluate' ): func(*lowercase__ )
141
'''simple docstring''' def __UpperCamelCase ( lowercase__ : list, lowercase__ : list, lowercase__ : int ): '''simple docstring''' __lowercase =len(lowercase__ ) __lowercase =[[0] * n for i in range(lowercase__ )] for i in range(lowercase__ ): __lowercase =y_points[i] for i in range(2, lowercase__ ): for j in range(lowercase__, lowercase__ ): __lowercase =( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
141
1
from __future__ import annotations class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _snake_case , _snake_case = text, pattern _snake_case , _snake_case = len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCAmelCase ( self ) -> list[int]: # searches pattern in text and returns index positions _snake_case = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case = self.mismatch_in_text(lowerCAmelCase_ ) if mismatch_index == -1: positions.append(lowerCAmelCase_ ) else: _snake_case = self.match_in_pattern(self.text[mismatch_index] ) _snake_case = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions UpperCAmelCase_ = """ABAABA""" UpperCAmelCase_ = """AB""" UpperCAmelCase_ = BoyerMooreSearch(text, pattern) UpperCAmelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
295
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata lowercase__ ="" if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'): class UpperCamelCase__ ( tr.AbstractTransform ): def __init__(self : Union[str, Any] , snake_case_ : str = " " ): __a : Any = sentence_delimiter def lowerCAmelCase (self : Dict , snake_case_ : str ): return list(lowerCamelCase__ ) def lowerCAmelCase (self : int , snake_case_ : List[str] ): __a : Tuple = [] for sent_idx, sentence in enumerate(lowerCamelCase__ ): chars.extend(self.process_string(lowerCamelCase__ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase__ ) - 1: chars.append(self.sentence_delimiter ) return chars lowercase__ =tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: lowercase__ =tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) lowercase__ ="\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" lowercase__ ="\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" lowercase__ ="\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): def lowerCAmelCase (self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def lowerCAmelCase (self : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int]=False ): if concatenate_texts: return jiwer.compute_measures( lowerCamelCase__ , lowerCamelCase__ , truth_transform=lowerCamelCase__ , hypothesis_transform=lowerCamelCase__ , )["wer"] __a : Dict = 0 __a : Dict = 0 for prediction, reference in zip(lowerCamelCase__ , lowerCamelCase__ ): __a : int = jiwer.compute_measures( lowerCamelCase__ , lowerCamelCase__ , truth_transform=lowerCamelCase__ , hypothesis_transform=lowerCamelCase__ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
216
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ): """simple docstring""" UpperCamelCase__ : Any = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) ) primes.add(2 ) for p in range(3 , SCREAMING_SNAKE_CASE , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) UpperCamelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )] for p in primes: for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f"{solution() = }")
146
0
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowerCamelCase : int = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : '''simple docstring''' _UpperCAmelCase : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _UpperCAmelCase : Optional[str] = field( default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _UpperCAmelCase : bool = field(default=UpperCAmelCase ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) @dataclass class SCREAMING_SNAKE_CASE__ : '''simple docstring''' _UpperCAmelCase : str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,) _UpperCAmelCase : int = field( default=1_2_8 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) _UpperCAmelCase : bool = field( default=UpperCAmelCase ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) def a_ ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) _snake_case = import_module('tasks' ) try: _snake_case = getattr(__lowercase , model_args.task_type ) _snake_case = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _snake_case = token_classification_task.get_labels(data_args.labels ) _snake_case = dict(enumerate(__lowercase ) ) _snake_case = len(__lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , ) _snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _snake_case = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , ) # Get datasets _snake_case = ( TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _snake_case = ( TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__lowercase : np.ndarray , __lowercase : np.ndarray ) -> Tuple[List[int], List[int]]: _snake_case = np.argmax(__lowercase , axis=2 ) _snake_case , _snake_case = preds.shape _snake_case = [[] for _ in range(__lowercase )] _snake_case = [[] for _ in range(__lowercase )] for i in range(__lowercase ): for j in range(__lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__lowercase : EvalPrediction ) -> Dict: _snake_case , _snake_case = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__lowercase , __lowercase ), "precision": precision_score(__lowercase , __lowercase ), "recall": recall_score(__lowercase , __lowercase ), "f1": fa_score(__lowercase , __lowercase ), } # Data collator _snake_case = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _snake_case = Trainer( model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _snake_case = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _snake_case = trainer.evaluate() _snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __lowercase , __lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(__lowercase ) # Predict if training_args.do_predict: _snake_case = TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _snake_case , _snake_case , _snake_case = trainer.predict(__lowercase ) _snake_case , _snake_case = align_predictions(__lowercase , __lowercase ) _snake_case = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , __lowercase , __lowercase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions _snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase ) return results def a_ ( __lowercase : Optional[Any] ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
130
def a_ ( __lowercase : int = 50_000_000 ) -> int: _snake_case = set() _snake_case = int((limit - 24) ** (1 / 2) ) _snake_case = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) ) for primea in primes: _snake_case = primea * primea for primea in primes: _snake_case = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: _snake_case = primea * primea * primea * primea _snake_case = square + cube + tetr if total >= limit: break ret.add(__lowercase ) return len(__lowercase ) if __name__ == "__main__": print(F'{solution() = }')
130
1
from __future__ import annotations lowercase__ :str = "#" class lowercase : def __init__( self): lowercase = {} def A__ ( self ,A__): lowercase = self._trie for char in text: if char not in trie: lowercase = {} lowercase = trie[char] lowercase = True def A__ ( self ,A__): lowercase = self._trie for char in prefix: if char in trie: lowercase = trie[char] else: return [] return self._elements(A__) def A__ ( self ,A__): lowercase = [] for c, v in d.items(): lowercase = [''' '''] if c == END else [(c + s) for s in self._elements(A__)] result.extend(A__) return tuple(A__) lowercase__ :Union[str, Any] = Trie() lowercase__ :int = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = trie.find_word(lowerCAmelCase__ ) return tuple(string + word for word in suffixes ) def UpperCamelCase ( ): '''simple docstring''' print(autocomplete_using_trie('''de''' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
101
def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
101
1
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class UpperCamelCase__ : _SCREAMING_SNAKE_CASE : CommonSchedulerState # setable values _SCREAMING_SNAKE_CASE : jnp.ndarray _SCREAMING_SNAKE_CASE : jnp.ndarray _SCREAMING_SNAKE_CASE : Optional[int] = None @classmethod def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ): return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ ) @dataclass class UpperCamelCase__ ( __lowercase ): _SCREAMING_SNAKE_CASE : DDPMSchedulerState class UpperCamelCase__ ( __lowercase ,__lowercase ): _SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers] _SCREAMING_SNAKE_CASE : jnp.dtype @property def lowerCAmelCase (self : Optional[Any] ): return True @register_to_config def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ): __a : str = dtype def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ): if common is None: __a : Optional[Any] = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution __a : int = jnp.array(1.0 , dtype=self.dtype ) __a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , ) def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ): return sample def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ): __a : Tuple = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 __a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case_ , timesteps=snake_case_ , ) def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ): __a : Optional[Any] = state.common.alphas_cumprod[t] __a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: __a : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": __a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": __a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) ) elif variance_type == "fixed_large": __a : List[str] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log __a : Union[str, Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": __a : Any = variance __a : Dict = state.common.betas[t] __a : Any = (predicted_variance + 1) / 2 __a : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ): __a : int = timestep if key is None: __a : Any = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 ) else: __a : int = None # 1. compute alphas, betas __a : Optional[int] = state.common.alphas_cumprod[t] __a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) __a : Optional[int] = 1 - alpha_prod_t __a : Union[str, Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __a : Union[str, Any] = model_output elif self.config.prediction_type == "v_prediction": __a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __a : Dict = jnp.clip(snake_case_ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t __a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): __a : Optional[int] = jax.random.split(snake_case_ , num=1 ) __a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise __a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) __a : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ ) def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ): return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ ) def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ): return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ ) def __len__(self : List[str] ): return self.config.num_train_timesteps
363
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ): __a : Any = len(lowerCAmelCase__ ) __a : Union[str, Any] = [] for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ): __a : List[Any] = True for j in range(lowerCAmelCase__ ): if s[i + j] != pattern[j]: __a : Union[str, Any] = False break if match_found: position.append(lowerCAmelCase__ ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
90
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowercase ( __snake_case : List[Any] ): lowercase_ : int = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase_ : Any = json.loads(open(__snake_case ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowercase_ : Dict = args.output + '''.pt''' lowercase_ : Any = OrderedDict() with tf.device('''/CPU:0''' ): lowercase_ : int = tf.train.load_checkpoint(args.tf_model_dir ) lowercase_ : Optional[int] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase_ : int = reader.get_tensor(__snake_case ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase_ : Optional[Any] = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase_ : Tuple = 8 lowercase_ : str = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[Any] = torch.tensor(__snake_case ) elif key_name.startswith('''model/moe''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase_ : List[str] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Tuple = torch.tensor(__snake_case ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase_ : List[str] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = torch.tensor(__snake_case ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase_ : Union[str, Any] = key_name[-9:-7] for i in range(1_6 ): lowercase_ : Tuple = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase_ : str = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase_ : Tuple = torch.tensor(__snake_case ) elif key_name.startswith('''model/mlp''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase_ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Dict = torch.tensor(__snake_case ) elif key_name.endswith('''/p1/bias''' ): lowercase_ : Any = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase_ : Any = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/kernel''' ): lowercase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/bias''' ): lowercase_ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase_ : List[str] = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.startswith('''model/ln''' ): lowercase_ : Optional[Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase_ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase_ : Any = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowercase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.startswith('''model/att''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase_ : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase_ : Tuple = state[:, 0, :, :] lowercase_ : Dict = state[:, 1, :, :] lowercase_ : Union[str, Any] = state[:, 2, :, :] lowercase_ : int = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[Any] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Union[str, Any] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase_ : str = torch.tensor(__snake_case ) lowercase_ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase_ : Any = torch.tensor(__snake_case ) lowercase_ : List[Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase_ : Any = torch.tensor(__snake_case ) elif key_name.endswith('''/o/kernel''' ): lowercase_ : Dict = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase_ : Optional[int] = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Any = torch.tensor(__snake_case ) elif key_name.startswith('''model/an''' ): lowercase_ : str = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase_ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowercase_ : str = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowercase_ : str = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase_ : Optional[Any] = vnp.copy() # same because it is one dimensional lowercase_ : Any = torch.tensor(__snake_case ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase_ : int = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase_ : str = '''model.%s.weight''' % nlayer lowercase_ : int = vnp.copy() # same in embedded lowercase_ : int = torch.tensor(__snake_case ) if key_name.startswith('''model/wte''' ): lowercase_ : Dict = '''lm_head.weight''' lowercase_ : Tuple = vnp.copy() # same in embedded lowercase_ : Dict = torch.tensor(__snake_case ) elif key_name.startswith('''model/wob''' ): lowercase_ : int = '''final_logits_bias''' lowercase_ : Any = vnp.copy() # same in embedded lowercase_ : Optional[Any] = state.reshape((1, -1) ) lowercase_ : Any = torch.tensor(__snake_case ) elif key_name == "model/dense/kernel": lowercase_ : List[str] = '''model.last_project.weight''' lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[Any] = torch.tensor(__snake_case ) elif key_name == "model/dense_1/bias": lowercase_ : Dict = '''model.last_project.bias''' lowercase_ : Tuple = vnp.copy() # same because it is one dimensional lowercase_ : Any = torch.tensor(__snake_case ) torch.save(__snake_case , args.output ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') __A : Any = parser.parse_args() convert_tf_gptsan_to_pt(args)
33
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict: lowercase_ : str = parent lowercase_ : List[Any] = batch_size lowercase_ : Optional[int] = image_size lowercase_ : int = num_channels lowercase_ : int = embeddings_size lowercase_ : str = hidden_sizes lowercase_ : List[str] = depths lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : Any = hidden_act lowercase_ : List[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Optional[Any] = len(A ) def A ( self : str ) -> Tuple: lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Union[str, Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[int] = self.get_config() return config, pixel_values, labels def A ( self : Dict ) -> int: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A ( self : str , A : Tuple , A : str , A : str ) -> str: lowercase_ : str = TFResNetModel(config=A ) lowercase_ : Union[str, Any] = model(A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]: lowercase_ : Tuple = self.num_labels lowercase_ : Union[str, Any] = TFResNetForImageClassification(A ) lowercase_ : Tuple = model(A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] ) -> Tuple: lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs lowercase_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _UpperCAmelCase ( _A , _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : List[Any] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Any = False def A ( self : Union[str, Any] ) -> List[Any]: lowercase_ : int = TFResNetModelTester(self ) lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A ) def A ( self : Dict ) -> Optional[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Dict ) -> List[Any]: return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def A ( self : Any ) -> Any: pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def A ( self : List[str] ) -> Optional[Any]: pass def A ( self : str ) -> Tuple: lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : int = model_class(A ) lowercase_ : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : str = [*signature.parameters.keys()] lowercase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) def A ( self : List[str] ) -> Tuple: lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def A ( self : List[Any] ) -> List[str]: def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ): lowercase_ : int = model_class(A ) lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) ) lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Any = self.model_tester.num_stages self.assertEqual(len(A ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : List[str] = layer_type lowercase_ : Tuple = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[Any] = True check_hidden_states_output(A , A , A ) def A ( self : Optional[int] ) -> Tuple: lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def A ( self : List[str] ) -> Optional[int]: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Tuple = TFResNetModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase ( ): lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def A ( self : Any ) -> Optional[int]: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A ( self : Any ) -> Optional[int]: lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase_ : List[Any] = self.default_image_processor lowercase_ : Dict = prepare_img() lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' ) # forward pass lowercase_ : Tuple = model(**A ) # verify the logits lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A ) lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
33
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : str , __A : Optional[int] , __A : Union[str, Any]=3 , __A : Dict=3_2 , __A : List[str]=3 , __A : Optional[int]=1_0 , __A : Any=[8, 1_6, 3_2, 6_4] , __A : Optional[Any]=[1, 1, 2, 1] , __A : Dict=True , __A : Optional[Any]=True , __A : Any="relu" , __A : Dict=3 , __A : Dict=None , __A : int=["stage2", "stage3", "stage4"] , __A : List[Any]=[2, 3, 4] , __A : Tuple=1 , ): snake_case__ : List[Any] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : List[str] = image_size snake_case__ : Optional[int] = num_channels snake_case__ : int = embeddings_size snake_case__ : Union[str, Any] = hidden_sizes snake_case__ : Optional[int] = depths snake_case__ : Union[str, Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Tuple = hidden_act snake_case__ : Dict = num_labels snake_case__ : Any = scope snake_case__ : Any = len(__A ) snake_case__ : List[str] = out_features snake_case__ : Optional[int] = out_indices snake_case__ : str = num_groups def _lowercase ( self : List[str] ): snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[Any] = self.get_config() return config, pixel_values, labels def _lowercase ( self : str ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _lowercase ( self : Dict , __A : Tuple , __A : int , __A : Tuple ): snake_case__ : List[Any] = BitModel(config=__A ) model.to(__A ) model.eval() snake_case__ : List[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _lowercase ( self : Dict , __A : int , __A : Dict , __A : Tuple ): snake_case__ : Optional[Any] = self.num_labels snake_case__ : List[str] = BitForImageClassification(__A ) model.to(__A ) model.eval() snake_case__ : Union[str, Any] = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : int , __A : int , __A : List[Any] , __A : Optional[Any] ): snake_case__ : Optional[Any] = BitBackbone(config=__A ) model.to(__A ) model.eval() snake_case__ : Optional[Any] = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ : Union[str, Any] = None snake_case__ : List[Any] = BitBackbone(config=__A ) model.to(__A ) model.eval() snake_case__ : Any = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowercase ( self : Tuple ): snake_case__ : int = self.prepare_config_and_inputs() snake_case__, snake_case__, snake_case__ : Dict = config_and_inputs snake_case__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a_ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def _lowercase ( self : int ): snake_case__ : List[Any] = BitModelTester(self ) snake_case__ : int = ConfigTester(self , config_class=__A , has_text_modality=__A ) def _lowercase ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self : int ): return @unittest.skip(reason="Bit does not output attentions" ) def _lowercase ( self : Optional[int] ): pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def _lowercase ( self : Dict ): pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def _lowercase ( self : str ): pass def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[Any] = model_class(__A ) snake_case__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[int] = [*signature.parameters.keys()] snake_case__ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def _lowercase ( self : int ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowercase ( self : Optional[int] ): snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__A ) def _lowercase ( self : Any ): snake_case__, snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : str = model_class(config=__A ) for name, module in model.named_modules(): if isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _lowercase ( self : Any ): def check_hidden_states_output(__A : Dict , __A : List[Any] , __A : List[Any] ): snake_case__ : Any = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): snake_case__ : Dict = model(**self._prepare_for_class(__A , __A ) ) snake_case__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : int = self.model_tester.num_stages self.assertEqual(len(__A ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__, snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[str] = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : int = layer_type snake_case__ : Tuple = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : Optional[int] = True check_hidden_states_output(__A , __A , __A ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def _lowercase ( self : Optional[int] ): pass def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def _lowercase ( self : int ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = BitModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowercase ( self : List[str] ): snake_case__ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A ) snake_case__ : int = self.default_image_processor snake_case__ : int = prepare_img() snake_case__ : Optional[int] = image_processor(images=__A , return_tensors="pt" ).to(__A ) # forward pass with torch.no_grad(): snake_case__ : Dict = model(**__A ) # verify the logits snake_case__ : Optional[int] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) snake_case__ : Dict = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = (BitBackbone,) if is_torch_available() else () a_ = BitConfig a_ = False def _lowercase ( self : Optional[Any] ): snake_case__ : int = BitModelTester(self )
286
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : List[Any] = parent snake_case__ : Union[str, Any] = batch_size snake_case__ : Tuple = num_channels snake_case__ : List[Any] = min_resolution snake_case__ : Optional[Any] = max_resolution snake_case__ : str = do_resize snake_case__ : List[str] = size snake_case__ : List[Any] = do_normalize snake_case__ : Dict = image_mean snake_case__ : List[Any] = image_std snake_case__ : int = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : str = do_pad def _lowercase ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Any = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w ) snake_case__ : Tuple = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Optional[Any] = self.size["shortest_edge"] snake_case__ : List[Any] = self.size["shortest_edge"] else: snake_case__ : Union[str, Any] = [] for image in image_inputs: snake_case__, snake_case__ : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : Optional[int] ): snake_case__ : str = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Union[str, Any] ): snake_case__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Tuple ): snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Any ): pass def _lowercase ( self : Optional[int] ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Any ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[int] ): # prepare image and target snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Optional[Any] = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : str = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : int = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Union[str, Any] ): # prepare image, target and masks_path snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Optional[int] = json.loads(f.read() ) snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
286
1