code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = tempfile.mkdtemp()
__lowercase : Optional[int] = 5
# Realm tok
__lowercase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__lowercase : List[Any] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def lowerCAmelCase ( self : List[str] ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=__lowerCAmelCase , )
return block_records
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.get_config()
__lowercase : str = self.get_dummy_retriever()
__lowercase : Optional[Any] = retriever.tokenizer
__lowercase : Dict = np.array([0, 3] , dtype="""long""" )
__lowercase : Tuple = tokenizer(["""Test question"""] ).input_ids
__lowercase : int = tokenizer(
["""the fourth"""] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
__lowercase : Union[str, Any] = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase : int = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""np""" )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase : Dict = self.get_config()
__lowercase : Union[str, Any] = self.get_dummy_retriever()
__lowercase : Optional[Any] = retriever.tokenizer
__lowercase : Tuple = np.array([0, 3, 5] , dtype="""long""" )
__lowercase : int = tokenizer(["""Test question"""] ).input_ids
__lowercase : str = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
__lowercase : Optional[int] = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase : Union[str, Any] = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""np""" )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__lowercase : Tuple = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__lowercase : List[str] = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowercase : str = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = """ylacombe/bark-small"""
__lowercase : Optional[Any] = tempfile.mkdtemp()
__lowercase : Optional[Any] = """en_speaker_1"""
__lowercase : List[Any] = """This is a test string"""
__lowercase : Optional[Any] = """speaker_embeddings_path.json"""
__lowercase : str = """speaker_embeddings"""
def lowerCAmelCase ( self : Union[str, Any] , **__a : int ) -> Union[str, Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[Any] = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowercase : Dict = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowercase : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowercase : Dict = 35
__lowercase : Optional[Any] = 2
__lowercase : int = 8
__lowercase : Any = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowercase : List[str] = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
__lowercase : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowercase : Tuple = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
__lowercase : Any = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
__lowercase : List[str] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowercase : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = BarkProcessor(tokenizer=UpperCAmelCase_ )
__lowercase : Any = processor(text=self.input_string )
__lowercase : Optional[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( ):
__lowercase : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowercase )
return parser.parse_args()
def snake_case_ ( ):
__lowercase : Optional[int] = parse_args()
# Import training_script as a module.
__lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase : Dict = script_fpath.stem
__lowercase : List[str] = importlib.import_module(_lowercase )
# Patch sys.argv
__lowercase : Union[str, Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return np.dot(__UpperCamelCase , __UpperCamelCase )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , *,
__a : Optional[int] = np.inf , __a : str = "linear" , __a : List[str] = 0.0 , ) -> None:
"""simple docstring"""
__lowercase : int = regularization
__lowercase : Dict = gamma
if kernel == "linear":
__lowercase : Union[str, Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
__lowercase : Dict = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__lowercase : Any = F"Unknown kernel: {kernel}"
raise ValueError(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : str ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : List[str] ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : Union[str, Any] ) -> None:
"""simple docstring"""
__lowercase : str = observations
__lowercase : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__lowercase) , ) : Union[str, Any] = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(__a : List[Any] ) -> float:
__lowercase : Optional[int] = 0
((__lowercase) , ) : str = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
__lowercase : int = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
__lowercase : Dict = Bounds(0 , self.regularization )
__lowercase : Tuple = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
__lowercase : Union[str, Any] = l_star
# calculating mean offset of separation plane to points
__lowercase : str = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__lowercase : str = s / n
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase : Dict = get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Optional[str] = None ) -> Any:
"""simple docstring"""
__lowercase : Any = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowercase : Dict = Extractor
def lowerCAmelCase ( self : int , __a : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowercase : Union[str, Any] = os.path.abspath(__a )
return os.path.join(self.extract_dir , hash_url_to_filename(__a ) )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__a ) and not (os.path.isdir(__a ) and os.listdir(__a ))
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = self.extractor.infer_extractor_format(__a )
if not extractor_format:
return input_path
__lowercase : int = self._get_output_path(__a )
if self._do_extract(__a , __a ):
self.extractor.extract(__a , __a , __a )
return output_path
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def lowerCAmelCase ( cls : Any , __a : Union[Path, str] , **__a : List[Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
...
class lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
_A : List[bytes] = []
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : int ) -> Dict:
"""simple docstring"""
with open(__a , """rb""" ) as f:
return f.read(__a )
@classmethod
def lowerCAmelCase ( cls : Tuple , __a : Union[Path, str] , __a : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__lowercase : str = max(len(__a ) for cls_magic_number in cls.magic_numbers )
try:
__lowercase : Any = cls.read_magic_number(__a , __a )
except OSError:
return False
return any(magic_number.startswith(__a ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[Path, str] , **__a : Optional[Any] ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__a )
@staticmethod
def lowerCAmelCase ( __a : Optional[Any] , __a : int ) -> Union[str, Any]:
"""simple docstring"""
def resolved(__a : str ) -> str:
return os.path.realpath(os.path.abspath(__a ) )
def badpath(__a : str , __a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a ) ).startswith(__a )
def badlink(__a : List[Any] , __a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowercase : str = resolved(os.path.join(__a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__a )
__lowercase : Dict = resolved(__a )
for finfo in members:
if badpath(finfo.name , __a ):
logger.error(F"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(__a , __a ):
logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(__a , __a ):
logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__a , exist_ok=__a )
__lowercase : Optional[int] = tarfile.open(__a )
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a ) )
tar_file.close()
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : str = [B'\x1F\x8B']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(__a , """rb""" ) as gzip_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : Union[Path, str] , __a : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__a , magic_number=__a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , """rb""" ) as fp:
__lowercase : int = _EndRecData(__a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowercase : List[Any] = fp.read(__a ) # CD is where we expect it to be
if len(__a ) == sizeCentralDir:
__lowercase : Tuple = struct.unpack(__a , __a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__a , exist_ok=__a )
with zipfile.ZipFile(__a , """r""" ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : List[Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(__a ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : List[str] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__a , exist_ok=__a )
__lowercase : Dict = rarfile.RarFile(__a )
rf.extractall(__a )
rf.close()
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : List[str] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowercase : int = zstd.ZstdDecompressor()
with open(__a , """rb""" ) as ifh, open(__a , """wb""" ) as ofh:
dctx.copy_stream(__a , __a )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : int = [B'\x42\x5A\x68']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : int = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__a , exist_ok=__a )
with pyazr.SevenZipFile(__a , """r""" ) as archive:
archive.extractall(__a )
class lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : Optional[int] = [B'\x04\x22\x4D\x18']
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowerCAmelCase :
'''simple docstring'''
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> Optional[int]:
"""simple docstring"""
return max(
len(__a )
for extractor in cls.extractors.values()
if issubclass(__a , __a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase ( __a : Union[Path, str] , __a : int ) -> Tuple:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a )
except OSError:
return b""
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[Path, str] , __a : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__a , )
__lowercase : Union[str, Any] = cls.infer_extractor_format(__a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase ( cls : Any , __a : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__lowercase : Dict = cls._get_magic_number_max_length()
__lowercase : Optional[Any] = cls._read_magic_number(__a , __a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a ):
return extractor_format
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : Union[Path, str] , __a : Union[Path, str] , __a : Optional[str] = None , __a : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__a ) , exist_ok=__a )
# Prevent parallel extractions
__lowercase : Dict = str(Path(__a ).with_suffix(""".lock""" ) )
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__a , )
__lowercase : Any = extractor if extractor != """deprecated""" else extractor_format
else:
__lowercase : Any = cls.extractors[extractor_format]
return extractor.extract(__a , __a )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a ):
return extractor.extract(__a , __a )
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
lowerCamelCase : Optional[Any] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase : Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase : Any = get_tests_dir('''fixtures''')
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : List[Any] = mock.Mock()
__lowercase : Any = 500
__lowercase : Any = {}
__lowercase : Optional[Any] = HTTPError
__lowercase : str = {}
# Download this model to make sure it's in the cache.
__lowercase : Optional[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A_ ) as mock_head:
__lowercase : Union[str, Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(A_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowercase : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__lowercase : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A_ )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase ( cls : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = TOKEN
HfFolder.save_token(A_ )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__lowercase : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id="""test-image-processor""" , push_to_hub=A_ , use_auth_token=self._token )
__lowercase : Union[str, Any] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__lowercase : Optional[int] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A_ , use_auth_token=self._token )
__lowercase : Any = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
__lowercase : str = CustomImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__lowercase : Any = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=A_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase : int = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
_A : str = ["""input_values""", """attention_mask"""]
def __init__( self : str , __a : str = 1 , __a : Optional[int] = 16000 , __a : Optional[Any] = 0.0 , __a : List[Any] = False , __a : Union[str, Any] = 80 , __a : int = 16 , __a : Optional[int] = 64 , __a : Tuple = "hann_window" , __a : Any = 1.0 , __a : List[str] = 80 , __a : List[str] = 7600 , __a : Any = 1E-10 , __a : Optional[int] = 2 , __a : Tuple = True , **__a : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
__lowercase : Any = do_normalize
__lowercase : Any = return_attention_mask
__lowercase : Optional[Any] = num_mel_bins
__lowercase : Optional[int] = hop_length
__lowercase : int = win_length
__lowercase : Any = win_function
__lowercase : List[str] = frame_signal_scale
__lowercase : Optional[Any] = fmin
__lowercase : Optional[int] = fmax
__lowercase : Union[str, Any] = mel_floor
__lowercase : str = reduction_factor
__lowercase : List[str] = win_length * sampling_rate // 1000
__lowercase : Tuple = hop_length * sampling_rate // 1000
__lowercase : Tuple = optimal_fft_length(self.sample_size )
__lowercase : List[Any] = (self.n_fft // 2) + 1
__lowercase : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__a )
__lowercase : Any = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __a , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __a , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase ( __a : Optional[int] , __a : str , __a : Tuple = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__lowercase : Optional[Any] = np.array(__a , np.intaa )
__lowercase : Tuple = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
__lowercase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__lowercase : Optional[int] = padding_value
normed_input_values.append(__a )
else:
__lowercase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase ( self : Dict , __a : Dict , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = spectrogram(
__a , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self : List[Any] , __a : int = None , __a : Optional[int] = None , __a : List[str] = False , __a : List[Any] = None , __a : Any = False , __a : str = None , __a : Optional[int] = None , __a : Optional[Any] = None , __a : Union[str, Any] = None , **__a : Any , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
__lowercase : str = self._process_audio(
__a , __a , __a , __a , __a , __a , __a , __a , **__a , )
else:
__lowercase : List[str] = None
if audio_target is not None:
__lowercase : Dict = self._process_audio(
__a , __a , __a , __a , __a , __a , __a , __a , **__a , )
if inputs is None:
return inputs_target
else:
__lowercase : int = inputs_target["""input_values"""]
__lowercase : int = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : Any , __a : str , __a : Tuple = False , __a : List[Any] = False , __a : str = None , __a : Tuple = False , __a : int = None , __a : Optional[int] = None , __a : str = None , **__a : Any , ) -> BatchFeature:
"""simple docstring"""
__lowercase : List[Any] = isinstance(__a , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__lowercase : int = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase : str = [np.asarray(__a , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__lowercase : Optional[Any] = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__lowercase : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase : Dict = [speech]
# needed to make pad() work on spectrogram inputs
__lowercase : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
__lowercase : Tuple = [self._extract_mel_features(__a ) for waveform in speech]
__lowercase : Optional[int] = BatchFeature({"""input_values""": features} )
__lowercase : Dict = self.num_mel_bins
else:
__lowercase : Union[str, Any] = BatchFeature({"""input_values""": speech} )
__lowercase : List[str] = self.pad(
__a , padding=__a , max_length=__a , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=__a , **__a , )
__lowercase : Dict = feature_size_hack
# convert input values to correct format
__lowercase : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
__lowercase : str = [np.asarray(__a , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__a , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__lowercase : int = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__a , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__lowercase : Dict = input_values.astype(np.floataa )
# convert attention_mask to correct format
__lowercase : Union[str, Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__lowercase : Tuple = [np.asarray(__a , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__lowercase : Optional[int] = (
attention_mask
if self._get_padding_strategies(__a , max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowercase : Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__a , padding_value=self.padding_value )
if return_tensors is not None:
__lowercase : Tuple = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def lowerCAmelCase ( self : Optional[int] ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase : Dict = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__lowercase : List[str] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : List[Any] = ''' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'''
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
__lowercase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase ( self : str , __a : List[str] , __a : Optional[Any] , __a : Union[str, Any] , __a : Any=None ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__lowercase : Tuple = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__lowercase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__lowercase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
__lowercase : Optional[int] = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCamelCase__ , """w""" , newline="""\n""" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , """r""" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
__lowercase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCamelCase__ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCamelCase__ ) , )
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowerCamelCase : List[Any] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = CamembertTokenizer
_A : Optional[int] = CamembertTokenizerFast
_A : Dict = True
_A : int = True
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Dict = CamembertTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : int = """<pad>"""
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 1004 )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : Any = CamembertTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase : List[Any] = """I was born in 92000, and this is falsé."""
__lowercase : List[str] = tokenizer.encode(lowerCAmelCase_ )
__lowercase : Dict = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowercase : List[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase : Dict = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
__lowercase : Any = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : Dict = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : Dict = """I was born in 92000, and this is falsé."""
__lowercase : Any = tokenizer.tokenize(lowerCAmelCase_ )
__lowercase : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : int = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowercase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = self.get_rust_tokenizer()
__lowercase : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ )
__lowercase : str = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase : str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCAmelCase_ , )
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Dict ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : List[str]="." ):
__lowercase : List[Any] = []
for k, v in d.items():
__lowercase : int = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
__lowercase : List[str] = argparse.Namespace()
with open(UpperCamelCase__ , """r""" ) as yaml_file:
try:
__lowercase : str = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
__lowercase : Optional[int] = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ):
__lowercase : Optional[int] = MobileViTVaConfig()
__lowercase : Union[str, Any] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
__lowercase : List[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
__lowercase : Any = 384
else:
__lowercase : str = 256
__lowercase : Tuple = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
__lowercase : Optional[Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
__lowercase : Dict = 384
else:
__lowercase : Tuple = 256
__lowercase : Dict = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
__lowercase : List[str] = 151
__lowercase : Dict = 512
__lowercase : Optional[Any] = """ade20k-id2label.json"""
__lowercase : Tuple = True
elif task_name.startswith("""voc_""" ):
__lowercase : List[Any] = 21
__lowercase : Union[str, Any] = 512
__lowercase : str = """pascal-voc-id2label.json"""
__lowercase : str = True
# orig_config
__lowercase : Optional[Any] = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
__lowercase : str = getattr(UpperCamelCase__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowercase : str = getattr(UpperCamelCase__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowercase : Tuple = getattr(UpperCamelCase__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
__lowercase : int = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
__lowercase : Any = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
__lowercase : str = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
__lowercase : Optional[Any] = """huggingface/label-files"""
__lowercase : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Dict = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ):
__lowercase : Any = dct.pop(UpperCamelCase__ )
__lowercase : List[str] = val
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=False ):
if base_model:
__lowercase : Any = """"""
else:
__lowercase : Dict = """mobilevitv2."""
__lowercase : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowercase : List[Any] = k[8:]
else:
__lowercase : Optional[Any] = k
if ".block." in k:
__lowercase : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
__lowercase : int = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
__lowercase : Union[str, Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
__lowercase : Optional[Any] = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
__lowercase : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__lowercase : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
__lowercase : int = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
__lowercase : List[str] = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
__lowercase : Dict = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
__lowercase : List[str] = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__lowercase : Optional[int] = [0, 1]
elif i == 4:
__lowercase : Dict = [0, 1, 2, 3]
elif i == 5:
__lowercase : Dict = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
__lowercase : int = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
__lowercase : Optional[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
__lowercase : Dict = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__lowercase : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
__lowercase : Optional[Any] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
__lowercase : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
__lowercase : List[str] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
__lowercase : Optional[int] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
__lowercase : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
__lowercase : Dict = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
__lowercase : str = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
__lowercase : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( ):
__lowercase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowercase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
__lowercase : Any = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
__lowercase : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
__lowercase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
__lowercase : List[Any] = False
else:
__lowercase : int = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
__lowercase : Dict = False
# remove and rename some keys of load the original model
__lowercase : List[str] = checkpoint
remove_unused_keys(UpperCamelCase__ )
__lowercase : List[Any] = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowercase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : Tuple = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
__lowercase : Tuple = outputs.logits
__lowercase : int = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowercase : Optional[Any] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Any = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''falcon'''
_A : Optional[Any] = ['''past_key_values''']
def __init__( self : Optional[Any] , __a : Optional[int]=65024 , __a : List[str]=4544 , __a : Dict=32 , __a : Optional[int]=71 , __a : Dict=1E-5 , __a : Union[str, Any]=0.02 , __a : Dict=True , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=0.0 , __a : Dict=None , __a : int=False , __a : List[str]=False , __a : Any=True , __a : Optional[int]=True , __a : List[str]=False , __a : List[Any]=11 , __a : int=11 , **__a : Dict , ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__lowercase : Any = kwargs.pop("""n_embed""" , UpperCAmelCase__ )
__lowercase : Optional[Any] = hidden_size if n_embed is None else n_embed
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Optional[int] = layer_norm_epsilon
__lowercase : List[str] = initializer_range
__lowercase : List[str] = use_cache
__lowercase : int = hidden_dropout
__lowercase : Union[str, Any] = attention_dropout
__lowercase : Dict = bos_token_id
__lowercase : Optional[Any] = eos_token_id
__lowercase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowercase : int = alibi
__lowercase : List[Any] = new_decoder_architecture
__lowercase : str = multi_query # Ignored when new_decoder_architecture is True
__lowercase : Optional[int] = parallel_attn
__lowercase : Dict = bias
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : str = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Union[str, Any] = logging.getLogger()
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] , __a : List[str] ) -> str:
"""simple docstring"""
os.makedirs(__a , exist_ok=__a )
__lowercase : Any = {"""source""": """What is love ?""", """target""": """life"""}
__lowercase : Any = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowercase : Dict = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , F"{split}.{field}" ) , """w""" ) as f:
f.write(__a )
def lowerCAmelCase ( self : Any , __a : int , __a : str = "pytorch" ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.get_auto_remove_tmp_dir()
__lowercase : Optional[int] = os.path.join(__a , """output""" )
__lowercase : Optional[Any] = os.path.join(__a , """data""" )
self._create_dummy_data(data_dir=__a )
__lowercase : Dict = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
__lowercase : int = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
__lowercase : int = os.path.join(__a , """metrics.json""" )
with open(__a ) as f:
__lowercase : int = json.load(__a )
return result
@require_torch_gpu
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self : Any , __a : Union[str, Any] , __a : List[Any]=13 , __a : Any=7 , __a : Optional[int]=True , __a : Dict=True , __a : int=True , __a : Optional[Any]=True , __a : Any=True , __a : Union[str, Any]=False , __a : Union[str, Any]=False , __a : Optional[Any]=False , __a : List[str]=2 , __a : Union[str, Any]=99 , __a : Union[str, Any]=0 , __a : List[Any]=32 , __a : Any=5 , __a : Tuple=4 , __a : List[Any]=0.1 , __a : str=0.1 , __a : Optional[int]=512 , __a : str=12 , __a : str=2 , __a : Optional[Any]=0.02 , __a : Dict=3 , __a : List[Any]=4 , __a : List[Any]="last" , __a : Tuple=None , __a : List[str]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parent
__lowercase : int = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Tuple = is_training
__lowercase : int = use_input_lengths
__lowercase : List[str] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Union[str, Any] = gelu_activation
__lowercase : int = sinusoidal_embeddings
__lowercase : Union[str, Any] = causal
__lowercase : Any = asm
__lowercase : Union[str, Any] = n_langs
__lowercase : int = vocab_size
__lowercase : str = n_special
__lowercase : Optional[int] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Tuple = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : Tuple = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = num_labels
__lowercase : Tuple = num_choices
__lowercase : List[str] = summary_type
__lowercase : Union[str, Any] = use_proj
__lowercase : List[Any] = scope
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Union[str, Any] = None
if self.use_input_lengths:
__lowercase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : Union[str, Any] = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : List[Any] = None
if self.use_labels:
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Optional[int] , __a : Optional[Any] , __a : str , __a : List[str] , __a : Dict , __a : List[str] , __a : int , __a : int , ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = FlaubertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Dict = model(lowercase__ , lengths=lowercase__ , langs=lowercase__ )
__lowercase : Dict = model(lowercase__ , langs=lowercase__ )
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : Any , __a : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : List[Any] , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = FlaubertWithLMHeadModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Optional[int] = model(lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : int , __a : Any , __a : Dict , __a : Union[str, Any] , __a : Tuple , __a : Union[str, Any] , __a : int , __a : Dict , __a : Tuple , __a : Dict , ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = FlaubertForQuestionAnsweringSimple(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : List[str] = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Optional[int] , __a : List[Any] , __a : str , __a : List[Any] , __a : Tuple , __a : Tuple , __a : str , __a : Optional[Any] , ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = FlaubertForQuestionAnswering(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : int = model(lowercase__ )
__lowercase : Dict = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , p_mask=lowercase__ , )
__lowercase : str = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , )
(__lowercase ) : Tuple = result_with_labels.to_tuple()
__lowercase : List[Any] = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
(__lowercase ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[Any] , __a : Optional[int] , __a : Any , __a : Union[str, Any] , __a : List[str] , __a : Optional[int] , __a : Tuple , __a : List[Any] , __a : List[str] , ) -> Dict:
"""simple docstring"""
__lowercase : Any = FlaubertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : List[str] = model(lowercase__ )
__lowercase : str = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Optional[int] , __a : Any , __a : Tuple , __a : Optional[int] , __a : List[str] , __a : Dict , __a : Any , __a : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Union[str, Any] = FlaubertForTokenClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : List[str] , __a : str , __a : Optional[Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : str , __a : Optional[int] , ) -> int:
"""simple docstring"""
__lowercase : str = self.num_choices
__lowercase : List[Any] = FlaubertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Dict = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
__lowercase
) : Optional[int] = config_and_inputs
__lowercase : Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : int = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict , __a : Optional[int] ) -> List[str]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Tuple , __a : Any , __a : List[str] , __a : str=False ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowercase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
__lowercase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = FlaubertModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=lowercase__ , emb_dim=37 )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase__ )
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase__ )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase__ )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase__ )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase__ )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase__ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = FlaubertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowercase : List[str] = True
__lowercase : List[Any] = model_class(config=lowercase__ )
__lowercase : str = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : int = torch.jit.trace(
lowercase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ , os.path.join(lowercase__ , """traced_model.pt""" ) )
__lowercase : Dict = torch.jit.load(os.path.join(lowercase__ , """traced_model.pt""" ) , map_location=lowercase__ )
loaded(inputs_dict["""input_ids"""].to(lowercase__ ) , inputs_dict["""attention_mask"""].to(lowercase__ ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__lowercase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
__lowercase : str = model(lowercase__ )[0]
__lowercase : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
__lowercase : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = IFInpaintingPipeline
_A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_A : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self._get_dummy_components()
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(lowercase__ ).startswith("""mps""" ):
__lowercase : Optional[Any] = torch.manual_seed(lowercase__ )
else:
__lowercase : Optional[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
import numpy
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : numpy.ndarray , __a : numpy.ndarray ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowercase : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowercase : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowercase : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowercase : Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowercase : List[str] = numpy.zeros(output_array.shape )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowercase : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowercase : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowercase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowercase : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase ( self : Dict , __a : numpy.ndarray , __a : int , __a : bool ) -> Any:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
__lowercase : Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
__lowercase : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def lowerCAmelCase ( self : Optional[int] , __a : numpy.ndarray ) -> str:
"""simple docstring"""
__lowercase : int = input_arr
__lowercase : Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowercase : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowercase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case_ ( lowerCAmelCase_ : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def snake_case_ ( lowerCAmelCase_ : numpy.ndarray ):
return (value) * (1 - (value))
def snake_case_ ( ):
__lowercase : Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowercase : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowercase : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : Dict = multiprocessing.Manager()
__lowercase : List[str] = manager.list()
__lowercase : Union[str, Any] = multiprocessing.Process(target=lowerCAmelCase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__lowercase : Tuple = shutil.rmtree
__lowercase : Optional[Any] = os.rmdir
__lowercase : Optional[int] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__lowercase : List[str] = {}
with swallow_io():
with time_limit(lowerCAmelCase_ ):
exec(lowerCAmelCase_ , lowerCAmelCase_ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
__lowercase : List[str] = rmtree
__lowercase : Optional[Any] = rmdir
__lowercase : List[str] = chdir
@contextlib.contextmanager
def snake_case_ ( lowerCAmelCase_ : Dict ):
def signal_handler(lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase_ )
signal.signal(signal.SIGALRM , lowerCAmelCase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
__lowercase : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase_ ):
with contextlib.redirect_stderr(lowerCAmelCase_ ):
with redirect_stdin(lowerCAmelCase_ ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase_ ):
yield dirname
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
class lowerCAmelCase ( io.StringIO ):
'''simple docstring'''
def lowerCAmelCase ( self : int , *__a : int , **__a : Union[str, Any] ) -> int:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : int , *__a : Tuple , **__a : int ) -> str:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : Any , *__a : Union[str, Any] , **__a : Any ) -> Dict:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : Tuple , *__a : Any , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return False
class lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
_A : List[str] = "stdin"
@contextlib.contextmanager
def snake_case_ ( lowerCAmelCase_ : int ):
if root == ".":
yield
return
__lowercase : Any = os.getcwd()
os.chdir(lowerCAmelCase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__lowercase : Dict = None
__lowercase : Any = None
import os
__lowercase : Optional[int] = """1"""
__lowercase : List[Any] = None
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
__lowercase : List[str] = None
__lowercase : List[str] = None
__lowercase : Dict = None
__lowercase : Optional[Any] = None
__lowercase : int = None
__lowercase : Union[str, Any] = None
__lowercase : Dict = None
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Optional[Any] = None
__lowercase : List[str] = None
__lowercase : Dict = None
__lowercase : Optional[Any] = None
__lowercase : List[str] = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
import shutil
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
import subprocess
__lowercase : List[str] = None # type: ignore
__lowercase : Any = None
import sys
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : Dict = None
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]="attention" ):
__lowercase : List[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowercase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowercase : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase : str = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowercase : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowercase : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=False ):
if split_mlp_wi:
__lowercase : Dict = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowercase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowercase : Union[str, Any] = (wi_a, wi_a)
else:
__lowercase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowercase : int = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def snake_case_ ( lowerCAmelCase_ : List[str] , *, lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] = False ):
__lowercase : int = traverse_util.flatten_dict(variables["""target"""] )
__lowercase : str = {"/".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase : Union[str, Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("""Split MLP:""" , lowercase_ )
__lowercase : List[Any] = collections.OrderedDict()
# Shared embeddings.
__lowercase : str = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : int = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_attention_layer_norm""" )
__lowercase : str = tax_attention_lookup(lowercase_ , lowercase_ , """encoder""" , """attention""" )
__lowercase : List[Any] = layer_norm
__lowercase : str = k.T
__lowercase : Optional[Any] = o.T
__lowercase : List[str] = q.T
__lowercase : int = v.T
# Block i, layer 1 (MLP).
__lowercase : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase : int = tax_mlp_lookup(lowercase_ , lowercase_ , """encoder""" , lowercase_ )
__lowercase : int = layer_norm
if split_mlp_wi:
__lowercase : Optional[int] = wi[0].T
__lowercase : Optional[Any] = wi[1].T
else:
__lowercase : Any = wi.T
__lowercase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : Optional[int] = tax_relpos_bias_lookup(
lowercase_ , lowercase_ , """encoder""" ).T
__lowercase : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__lowercase : Any = tax_relpos_bias_lookup(
lowercase_ , 0 , """encoder""" ).T
__lowercase : str = tax_relpos_bias_lookup(
lowercase_ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : List[Any] = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase : Any = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """self_attention""" )
__lowercase : List[str] = layer_norm
__lowercase : Union[str, Any] = k.T
__lowercase : str = o.T
__lowercase : List[str] = q.T
__lowercase : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
__lowercase : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase : int = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """encoder_decoder_attention""" )
__lowercase : List[str] = layer_norm
__lowercase : str = k.T
__lowercase : List[Any] = o.T
__lowercase : int = q.T
__lowercase : List[Any] = v.T
# Block i, layer 2 (MLP).
__lowercase : Tuple = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase : Tuple = tax_mlp_lookup(lowercase_ , lowercase_ , """decoder""" , lowercase_ )
__lowercase : Optional[int] = layer_norm
if split_mlp_wi:
__lowercase : List[Any] = wi[0].T
__lowercase : Optional[int] = wi[1].T
else:
__lowercase : str = wi.T
__lowercase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : Optional[int] = tax_relpos_bias_lookup(lowercase_ , lowercase_ , """decoder""" ).T
__lowercase : Tuple = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase : Any = old["decoder/logits_dense/kernel"].T
return new
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
__lowercase : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase : int = state_dict["shared.weight"]
return state_dict
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ):
__lowercase : int = checkpoints.load_tax_checkpoint(lowercase_ )
__lowercase : List[str] = convert_tax_to_pytorch(
lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ , scalable_attention=lowercase_ )
__lowercase : Union[str, Any] = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str = False , lowerCAmelCase_ : Any = False , ):
__lowercase : int = MTaConfig.from_json_file(lowercase_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase : Optional[int] = UMTaEncoderModel(lowercase_ )
else:
__lowercase : Optional[int] = UMTaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = RobertaTokenizer
_A : Optional[int] = RobertaTokenizerFast
_A : Union[str, Any] = True
_A : Union[str, Any] = {"cls_token": "<s>"}
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Any = {"""unk_token""": """<unk>"""}
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_snake_case ) )
def lowerCAmelCase ( self : Optional[Any] , **__a : str ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Dict ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = """lower newer"""
__lowercase : List[Any] = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Optional[int] = """lower newer"""
__lowercase : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : Any = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Optional[Any] = tokens + [tokenizer.unk_token]
__lowercase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_snake_case ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_snake_case ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""roberta-base""" )
__lowercase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case )
__lowercase : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case )
__lowercase : Optional[int] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[int] = """Encode this sequence."""
__lowercase : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Optional[int] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
__lowercase : List[Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : Optional[int] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
__lowercase : Tuple = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
__lowercase : Any = tokenizer.convert_tokens_to_ids(_snake_case )
__lowercase : Optional[Any] = """Encode <mask> sequence"""
__lowercase : Tuple = """Encode <mask>sequence"""
__lowercase : List[Any] = tokenizer.encode(_snake_case )
__lowercase : Any = encoded.index(_snake_case )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
__lowercase : Any = tokenizer.encode(_snake_case )
__lowercase : Dict = encoded.index(_snake_case )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : str = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowercase : List[str] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowercase : str = """A, <mask> AllenNLP sentence."""
__lowercase : Any = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
__lowercase : List[str] = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _snake_case )
self.assertEqual(post_processor_state["""add_prefix_space"""] , _snake_case )
self.assertEqual(post_processor_state["""trim_offsets"""] , _snake_case )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : str = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : Optional[Any] = F"{text_of_1_token} {text_of_1_token}"
__lowercase : Any = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Tuple = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : str = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Tuple = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : str = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Any = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Dict = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Any = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Tuple = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase : Any = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
lowerCamelCase : List[str] = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase : Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})["""content"""]
lowerCamelCase : int = requests.get(image_url).content
lowerCamelCase : Dict = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase : Union[str, Any] = '''PoolFormerConfig'''
# Base docstring
lowerCamelCase : Union[str, Any] = '''sail/poolformer_s12'''
lowerCamelCase : str = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase : int = '''sail/poolformer_s12'''
lowerCamelCase : str = '''tabby, tabby cat'''
lowerCamelCase : int = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str = 0.0 , lowerCAmelCase_ : Dict = False ):
if drop_prob == 0.0 or not training:
return input
__lowercase : List[str] = 1 - drop_prob
__lowercase : Optional[int] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowercase : str = keep_prob + torch.rand(snake_case_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowercase : List[str] = input.div(snake_case_ ) * random_tensor
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int = None ) -> None:
"""simple docstring"""
super().__init__()
__lowercase : List[str] = drop_prob
def lowerCAmelCase ( self : Tuple , __a : List[Any] ) -> torch.Tensor:
"""simple docstring"""
return drop_path(_a , self.drop_prob , self.training )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Tuple , __a : Dict , __a : List[Any] , __a : Dict , __a : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase : int = patch_size if isinstance(_a , collections.abc.Iterable ) else (patch_size, patch_size)
__lowercase : Optional[Any] = stride if isinstance(_a , collections.abc.Iterable ) else (stride, stride)
__lowercase : Dict = padding if isinstance(_a , collections.abc.Iterable ) else (padding, padding)
__lowercase : Optional[Any] = nn.Convad(_a , _a , kernel_size=_a , stride=_a , padding=_a )
__lowercase : str = norm_layer(_a ) if norm_layer else nn.Identity()
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = self.projection(_a )
__lowercase : List[Any] = self.norm(_a )
return embeddings
class lowerCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Optional[int] , **__a : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(1 , _a , **_a )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__()
__lowercase : Union[str, Any] = nn.AvgPoolad(_a , stride=1 , padding=pool_size // 2 , count_include_pad=_a )
def lowerCAmelCase ( self : Any , __a : int ) -> Tuple:
"""simple docstring"""
return self.pool(_a ) - hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __a : Dict , __a : List[Any] , __a : int , __a : List[str] ) -> int:
"""simple docstring"""
super().__init__()
__lowercase : Tuple = nn.Convad(_a , _a , 1 )
__lowercase : Tuple = nn.Convad(_a , _a , 1 )
__lowercase : Any = PoolFormerDropPath(_a )
if isinstance(config.hidden_act , _a ):
__lowercase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowercase : Optional[Any] = config.hidden_act
def lowerCAmelCase ( self : List[str] , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.conva(_a )
__lowercase : int = self.act_fn(_a )
__lowercase : List[str] = self.drop(_a )
__lowercase : Union[str, Any] = self.conva(_a )
__lowercase : Optional[int] = self.drop(_a )
return hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __a : Dict , __a : Optional[Any] , __a : Any , __a : str , __a : Optional[int] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : List[str] = PoolFormerPooling(_a )
__lowercase : int = PoolFormerOutput(_a , _a , _a , _a )
__lowercase : Optional[Any] = PoolFormerGroupNorm(_a )
__lowercase : Optional[Any] = PoolFormerGroupNorm(_a )
# Useful for training neural nets
__lowercase : List[Any] = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
__lowercase : List[str] = config.use_layer_scale
if config.use_layer_scale:
__lowercase : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
__lowercase : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
def lowerCAmelCase ( self : Optional[Any] , __a : Any ) -> Union[str, Any]:
"""simple docstring"""
if self.use_layer_scale:
__lowercase : Optional[Any] = self.pooling(self.before_norm(_a ) )
__lowercase : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowercase : Union[str, Any] = hidden_states + self.drop_path(_a )
__lowercase : List[Any] = ()
__lowercase : List[str] = self.output(self.after_norm(_a ) )
__lowercase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowercase : List[Any] = hidden_states + self.drop_path(_a )
__lowercase : Union[str, Any] = (output,) + outputs
return outputs
else:
__lowercase : int = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
__lowercase : int = pooling_output + hidden_states
__lowercase : Dict = ()
# Second residual connection inside the PoolFormerOutput block
__lowercase : int = self.drop_path(self.output(self.after_norm(_a ) ) )
__lowercase : Union[str, Any] = hidden_states + layer_output
__lowercase : Dict = (output,) + outputs
return outputs
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Tuple ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase : Union[str, Any] = config
# stochastic depth decay rule
__lowercase : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowercase : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowercase : Optional[int] = nn.ModuleList(_a )
# Transformer blocks
__lowercase : Optional[Any] = []
__lowercase : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowercase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_a ) )
__lowercase : List[Any] = nn.ModuleList(_a )
def lowerCAmelCase ( self : List[str] , __a : List[Any] , __a : str=False , __a : Dict=True ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = () if output_hidden_states else None
__lowercase : List[str] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowercase : Union[str, Any] = layers
# Get patch embeddings from hidden_states
__lowercase : str = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
__lowercase : Optional[Any] = blk(_a )
__lowercase : List[str] = layer_outputs[0]
if output_hidden_states:
__lowercase : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
_A : Any = PoolFormerConfig
_A : Optional[Any] = '''poolformer'''
_A : Optional[Any] = '''pixel_values'''
_A : Optional[Any] = True
def lowerCAmelCase ( self : Dict , __a : Tuple ) -> Tuple:
"""simple docstring"""
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase ( self : Optional[Any] , __a : List[str] , __a : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_a , _a ):
__lowercase : List[str] = value
lowerCamelCase : Optional[int] = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
lowerCamelCase : List[str] = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : str , __a : int ) -> List[str]:
"""simple docstring"""
super().__init__(_a )
__lowercase : Any = config
__lowercase : Union[str, Any] = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self : Tuple , __a : Dict = None , __a : List[Any] = None , __a : Tuple = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__lowercase : Any = self.encoder(
_a , output_hidden_states=_a , return_dict=_a , )
__lowercase : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase : List[str] = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase ( self : List[str] , __a : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.dense(_a )
return output
@add_start_docstrings(
'''\n PoolFormer Model transformer with an image classification head on top\n ''' , UpperCamelCase__ , )
class lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(_a )
__lowercase : Tuple = config.num_labels
__lowercase : Optional[Any] = PoolFormerModel(_a )
# Final norm
__lowercase : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowercase : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self : Tuple , __a : Any = None , __a : Union[str, Any] = None , __a : List[Any] = None , __a : Optional[int] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Optional[Any] = self.poolformer(
_a , output_hidden_states=_a , return_dict=_a , )
__lowercase : Tuple = outputs[0]
__lowercase : Any = self.classifier(self.norm(_a ).mean([-2, -1] ) )
__lowercase : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase : Union[str, Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase : Optional[int] = """single_label_classification"""
else:
__lowercase : List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowercase : Union[str, Any] = MSELoss()
if self.num_labels == 1:
__lowercase : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase : Optional[Any] = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
__lowercase : List[str] = CrossEntropyLoss()
__lowercase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase : List[Any] = BCEWithLogitsLoss()
__lowercase : Dict = loss_fct(_a , _a )
if not return_dict:
__lowercase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
set_seed(7_70)
lowerCamelCase : Optional[Any] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
lowerCamelCase : Optional[int] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
lowerCamelCase : Tuple = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase : List[str] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCamelCase : Tuple = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=False ):
__lowercase : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__UpperCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
hf_hub_download(repo_id=__UpperCamelCase , filename=__UpperCamelCase , local_dir=__UpperCamelCase )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]="text" ):
if model_type == "text":
__lowercase : Optional[int] = BarkSemanticModel
__lowercase : str = BarkSemanticConfig
__lowercase : int = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase : Optional[int] = BarkCoarseModel
__lowercase : Tuple = BarkCoarseConfig
__lowercase : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase : str = BarkFineModel
__lowercase : List[Any] = BarkFineConfig
__lowercase : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase : Optional[Any] = F"{model_type}_small" if use_small else model_type
__lowercase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__UpperCamelCase ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
__lowercase : int = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
# this is a hack
__lowercase : Union[str, Any] = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowercase : Any = model_args['''vocab_size''']
__lowercase : int = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase : Optional[Any] = model_args.pop("""n_head""" )
__lowercase : Tuple = model_args.pop("""n_embd""" )
__lowercase : Tuple = model_args.pop("""n_layer""" )
__lowercase : Any = ConfigClass(**checkpoint["""model_args"""] )
__lowercase : Tuple = ModelClass(config=__UpperCamelCase )
__lowercase : Any = GenerationConfigClass()
__lowercase : Optional[int] = model_generation_config
__lowercase : Union[str, Any] = checkpoint['''model''']
# fixup checkpoint
__lowercase : List[str] = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase : List[Any] = k[len(__UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
__lowercase : List[Any] = new_k.replace(__UpperCamelCase , new_layer_name_dict[old_layer_name] )
__lowercase : Optional[int] = state_dict.pop(__UpperCamelCase )
__lowercase : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
__lowercase : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase : Any = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__UpperCamelCase ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(__UpperCamelCase ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
__lowercase : Optional[Any] = model.num_parameters(exclude_embeddings=__UpperCamelCase )
__lowercase : str = checkpoint['''best_val_loss'''].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(__UpperCamelCase , 3 )} loss" )
model.eval()
model.to(__UpperCamelCase )
del checkpoint, state_dict
return model
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase : str = '''cpu''' # do conversion on cpu
__lowercase : int = _get_ckpt_path(__UpperCamelCase , use_small=__UpperCamelCase )
__lowercase : Optional[int] = _load_model(__UpperCamelCase , __UpperCamelCase , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
# load bark initial model
__lowercase : List[Any] = _bark_load_model(__UpperCamelCase , """cpu""" , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
if model_type == "text":
__lowercase : Optional[int] = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__UpperCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don\'t have the same number of parameters""" )
# check if same output as the bark model
__lowercase : Optional[Any] = 5
__lowercase : Dict = 10
if model_type in ["text", "coarse"]:
__lowercase : Tuple = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase : Tuple = bark_model(__UpperCamelCase )[0]
__lowercase : int = model(__UpperCamelCase )
# take last logits
__lowercase : int = output_new_model_total.logits[:, [-1], :]
else:
__lowercase : str = 3
__lowercase : List[str] = 8
__lowercase : List[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
__lowercase : Union[str, Any] = bark_model(__UpperCamelCase , __UpperCamelCase )
__lowercase : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don\'t have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , ):
__lowercase : int = os.path.join(__UpperCamelCase , __UpperCamelCase )
__lowercase : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) )
__lowercase : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) )
__lowercase : str = BarkFineConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) )
__lowercase : int = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
__lowercase : Dict = BarkSemanticModel.from_pretrained(__UpperCamelCase )
__lowercase : str = BarkCoarseModel.from_pretrained(__UpperCamelCase )
__lowercase : Optional[int] = BarkFineModel.from_pretrained(__UpperCamelCase )
__lowercase : int = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
__lowercase : str = BarkConfig.from_sub_model_configs(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase : List[Any] = BarkModel(__UpperCamelCase )
__lowercase : List[Any] = semantic
__lowercase : Tuple = coarseAcoustic
__lowercase : Union[str, Any] = fineAcoustic
__lowercase : Any = codec
__lowercase : Union[str, Any] = bark_generation_config
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
bark.save_pretrained(__UpperCamelCase , repo_id=__UpperCamelCase , push_to_hub=__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCamelCase : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith("""encoder""" ):
__lowercase : int = k.replace(""".attn""" , """.self_attn""" )
__lowercase : int = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowercase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : Optional[int] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowercase : Dict = k.replace("""norm3""" , """final_layer_norm""" )
return k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : str = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__lowercase : int = sd.pop(__snake_case )
__lowercase : int = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowercase : str = v
lowerCamelCase : Tuple = ["START"]
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ):
__lowercase : int = torch.load(__snake_case , map_location="""cpu""" )
__lowercase : int = model["model"]
__lowercase : List[Any] = BlenderbotConfig.from_json_file(__snake_case )
__lowercase : Optional[Any] = BlenderbotForConditionalGeneration(__snake_case )
__lowercase : List[Any] = m.model.state_dict().keys()
__lowercase : int = []
__lowercase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase : Any = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
__lowercase : Tuple = WavaVecaForSequenceClassification.from_pretrained(a__ , config=a__ )
__lowercase : Union[str, Any] = downstream_dict["""projector.weight"""]
__lowercase : str = downstream_dict["""projector.bias"""]
__lowercase : Dict = downstream_dict["""model.post_net.linear.weight"""]
__lowercase : Any = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
__lowercase : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(a__ , config=a__ )
__lowercase : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__lowercase : Dict = downstream_dict["""model.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = WavaVecaForXVector.from_pretrained(a__ , config=a__ )
__lowercase : Optional[Any] = downstream_dict["""connector.weight"""]
__lowercase : Optional[int] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase : Optional[int] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase : int = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__lowercase : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__lowercase : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__lowercase : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__lowercase : List[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : Dict = torch.load(a__ , map_location="""cpu""" )
__lowercase : Union[str, Any] = checkpoint["""Downstream"""]
__lowercase : Optional[Any] = WavaVecaConfig.from_pretrained(a__ )
__lowercase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
__lowercase : int = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__lowercase : Dict = convert_classification(a__ , a__ , a__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__lowercase : Tuple = convert_diarization(a__ , a__ , a__ )
elif arch.endswith("""ForXVector""" ):
__lowercase : Tuple = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase : Union[str, Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCamelCase : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : List[Any] , __a : Optional[Any]=13 , __a : Optional[int]=7 , __a : Tuple=True , __a : Union[str, Any]=True , __a : List[Any]=True , __a : str=True , __a : Tuple=99 , __a : Union[str, Any]=64 , __a : Tuple=32 , __a : Optional[int]=5 , __a : int=4 , __a : Optional[int]=37 , __a : str="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : List[Any]=512 , __a : str=16 , __a : int=2 , __a : List[str]=0.02 , __a : List[Any]=3 , __a : Optional[int]=4 , __a : Tuple=None , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Tuple = seq_length
__lowercase : Union[str, Any] = is_training
__lowercase : Tuple = use_input_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : str = embedding_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : str = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Optional[int] = type_sequence_label_size
__lowercase : List[Any] = initializer_range
__lowercase : Any = num_labels
__lowercase : Optional[int] = num_choices
__lowercase : int = scope
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[Any] = None
if self.use_token_type_ids:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : List[str] = None
__lowercase : Dict = None
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[Any] , __a : List[Any] , __a : Any , __a : List[Any] , __a : Any , __a : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
__lowercase : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A )
__lowercase : str = model(__A , token_type_ids=__A )
__lowercase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : int , __a : int ) -> Any:
"""simple docstring"""
__lowercase : Dict = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__lowercase : Dict = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , __a : Tuple , __a : List[Any] , __a : Any , __a : Optional[Any] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
__lowercase : Dict = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : Dict , __a : List[str] , __a : List[str] , __a : List[Any] , __a : Optional[int] , __a : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
__lowercase : Optional[int] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : str , __a : int , __a : List[str] , __a : Optional[Any] , __a : List[Any] , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
__lowercase : str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : Union[str, Any] , __a : int ) -> int:
"""simple docstring"""
__lowercase : Tuple = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__lowercase : Dict = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Union[str, Any] , __a : str , __a : int , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.num_labels
__lowercase : str = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__lowercase : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , __a : List[str] , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.num_labels
__lowercase : Optional[Any] = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__lowercase : Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict , __a : str , __a : List[str] , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.num_choices
__lowercase : Optional[Any] = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[Any] = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : str = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : List[Any] = True
# test_resize_embeddings = False
_A : str = False
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : List[Any] , __a : Optional[int]=False ) -> List[Any]:
"""simple docstring"""
__lowercase : int = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
__lowercase : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
__lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = MegatronBertModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
lowerCamelCase : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
__lowercase : str = os.path.join(os.environ["""MYDIR"""] , __A )
__lowercase : str = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
__lowercase : Dict = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__lowercase : str = model(__A )[0]
__lowercase : Union[str, Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
__lowercase : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__lowercase : List[str] = output[0, ii, jj]
__lowercase : Optional[int] = expected[3 * ii + jj]
__lowercase : List[Any] = """ii={} jj={} a={} b={}""".format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[int] = 0
while len(_UpperCAmelCase ) > 1:
__lowercase : int = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowercase : Dict = files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase : Optional[Any] = '''pytorch_model.bin'''
lowerCamelCase : Optional[Any] = '''pytorch_model.bin.index.json'''
lowerCamelCase : Optional[Any] = '''adapter_config.json'''
lowerCamelCase : int = '''adapter_model.bin'''
lowerCamelCase : Union[str, Any] = '''adapter_model.safetensors'''
lowerCamelCase : Tuple = '''tf_model.h5'''
lowerCamelCase : Optional[int] = '''tf_model.h5.index.json'''
lowerCamelCase : Any = '''model.ckpt'''
lowerCamelCase : List[str] = '''flax_model.msgpack'''
lowerCamelCase : str = '''flax_model.msgpack.index.json'''
lowerCamelCase : Tuple = '''model.safetensors'''
lowerCamelCase : Optional[Any] = '''model.safetensors.index.json'''
lowerCamelCase : Tuple = '''config.json'''
lowerCamelCase : Dict = '''preprocessor_config.json'''
lowerCamelCase : List[str] = FEATURE_EXTRACTOR_NAME
lowerCamelCase : Tuple = '''generation_config.json'''
lowerCamelCase : Dict = '''modelcard.json'''
lowerCamelCase : List[Any] = '''▁'''
lowerCamelCase : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase : str = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase : Optional[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
if version.parse(lowerCAmelCase_ ) < version.parse(lowerCAmelCase_ ):
if "dev" in min_version:
__lowercase : Optional[Any] = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
__lowercase : Union[str, Any] = F"This example requires a minimum version of {min_version},"
error_message += F" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''marian'''
_A : Any = ['''past_key_values''']
_A : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] , __a : Any=58101 , __a : Optional[Any]=None , __a : List[str]=1024 , __a : Any=12 , __a : Tuple=4096 , __a : Union[str, Any]=16 , __a : Tuple=12 , __a : Optional[int]=4096 , __a : List[Any]=16 , __a : Optional[Any]=0.0 , __a : Optional[Any]=0.0 , __a : Optional[int]=True , __a : str=True , __a : Tuple="gelu" , __a : str=1024 , __a : str=0.1 , __a : Dict=0.0 , __a : List[str]=0.0 , __a : List[Any]=0.02 , __a : int=58100 , __a : Tuple=False , __a : Optional[int]=58100 , __a : int=0 , __a : Optional[int]=0 , __a : Optional[int]=True , **__a : Dict , ) -> Dict:
"""simple docstring"""
__lowercase : str = vocab_size
__lowercase : Any = decoder_vocab_size or vocab_size
__lowercase : Optional[int] = max_position_embeddings
__lowercase : Dict = d_model
__lowercase : Any = encoder_ffn_dim
__lowercase : List[Any] = encoder_layers
__lowercase : Optional[Any] = encoder_attention_heads
__lowercase : str = decoder_ffn_dim
__lowercase : Any = decoder_layers
__lowercase : Optional[Any] = decoder_attention_heads
__lowercase : Any = dropout
__lowercase : Optional[int] = attention_dropout
__lowercase : Union[str, Any] = activation_dropout
__lowercase : Tuple = activation_function
__lowercase : Any = init_std
__lowercase : Optional[Any] = encoder_layerdrop
__lowercase : int = decoder_layerdrop
__lowercase : List[str] = use_cache
__lowercase : List[str] = encoder_layers
__lowercase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase : Optional[int] = {0: """batch"""}
__lowercase : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowercase : Any = {0: """batch""", 1: """decoder_sequence"""}
__lowercase : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase , __lowercase : int = self.num_layers
for i in range(__A ):
__lowercase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
__lowercase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowercase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : List[Any] = super().outputs
else:
__lowercase : List[Any] = super(__A , self ).outputs
if self.use_past:
__lowercase , __lowercase : Tuple = self.num_layers
for i in range(__A ):
__lowercase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
__lowercase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase ( self : Optional[Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
# Generate decoder inputs
__lowercase : Optional[Any] = seq_length if not self.use_past else 1
__lowercase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
__lowercase : List[str] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase : List[Any] = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase : Any = common_inputs["""input_ids"""].shape
__lowercase : int = common_inputs["""decoder_input_ids"""].shape[1]
__lowercase , __lowercase : Dict = self.num_attention_heads
__lowercase : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase : Union[str, Any] = decoder_seq_length + 3
__lowercase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase : Tuple = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__A , __A )] , dim=1 )
__lowercase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase : Tuple = self.num_layers
__lowercase : int = min(__A , __A )
__lowercase : Tuple = max(__A , __A ) - min_num_layers
__lowercase : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
__lowercase : Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def lowerCAmelCase ( self : List[str] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase : Optional[int] = seqlen + 2
__lowercase , __lowercase : Dict = self.num_layers
__lowercase , __lowercase : Optional[int] = self.num_attention_heads
__lowercase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase : Optional[int] = common_inputs["""attention_mask"""].dtype
__lowercase : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__A , __A , dtype=__A )] , dim=1 )
__lowercase : str = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def lowerCAmelCase ( self : int , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : int = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Any = tokenizer.num_special_tokens_to_add(__A )
__lowercase : str = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Tuple = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase : Dict = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
__lowercase : Any = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def lowerCAmelCase ( self : Dict , __a : Any , __a : Any , __a : Tuple , __a : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : Union[str, Any] = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
__lowercase : List[str] = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
@property
def lowerCAmelCase ( self : Any ) -> float:
"""simple docstring"""
return 1E-4
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Any , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Optional[int] = 13
__lowercase : Optional[int] = 7
__lowercase : Any = True
__lowercase : Optional[int] = True
__lowercase : Any = False
__lowercase : str = True
__lowercase : Optional[Any] = 99
__lowercase : Dict = 32
__lowercase : List[Any] = 2
__lowercase : int = 4
__lowercase : List[str] = 37
__lowercase : Dict = """gelu"""
__lowercase : Any = 0.1
__lowercase : Tuple = 0.1
__lowercase : Tuple = 512
__lowercase : Optional[int] = 16
__lowercase : Any = 2
__lowercase : Optional[int] = 0.02
__lowercase : str = 3
__lowercase : Optional[int] = 4
__lowercase : List[str] = None
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : List[Any] = None
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : Tuple , __a : Dict , __a : str , __a : int , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : int = TFDistilBertModel(config=_lowercase )
__lowercase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : str = model(_lowercase )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] , __a : str , __a : Dict , __a : List[str] , __a : Union[str, Any] , __a : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Dict = TFDistilBertForMaskedLM(config=_lowercase )
__lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : Any , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = TFDistilBertForQuestionAnswering(config=_lowercase )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__lowercase : Tuple = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : str , __a : Optional[Any] , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.num_labels
__lowercase : Union[str, Any] = TFDistilBertForSequenceClassification(_lowercase )
__lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Tuple = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , __a : List[str] , __a : Union[str, Any] , __a : Dict , __a : Any , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.num_choices
__lowercase : List[Any] = TFDistilBertForMultipleChoice(_lowercase )
__lowercase : List[str] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__lowercase : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : Optional[int] , __a : List[str] , __a : Dict , __a : Optional[int] , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : Tuple = TFDistilBertForTokenClassification(_lowercase )
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(__lowercase) : List[str] = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_A : str = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = TFDistilBertModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=_lowercase , dim=37 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowercase : Any = TFDistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Optional[Any] = model(_lowercase )[0]
__lowercase : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
__lowercase : int = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 )
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowercase_ ) * abs(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A : Tuple = 'SpeechT5FeatureExtractor'
_A : int = 'SpeechT5Tokenizer'
def __init__( self : Dict , __a : int , __a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a , _a )
def __call__( self : Optional[Any] , *__a : Any , **__a : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = kwargs.pop("""audio""" , _a )
__lowercase : List[str] = kwargs.pop("""text""" , _a )
__lowercase : Dict = kwargs.pop("""text_target""" , _a )
__lowercase : int = kwargs.pop("""audio_target""" , _a )
__lowercase : List[str] = kwargs.pop("""sampling_rate""" , _a )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__lowercase : Dict = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
elif text is not None:
__lowercase : List[str] = self.tokenizer(_a , **_a )
else:
__lowercase : Tuple = None
if audio_target is not None:
__lowercase : Optional[Any] = self.feature_extractor(audio_target=_a , *_a , sampling_rate=_a , **_a )
__lowercase : Tuple = targets["""input_values"""]
elif text_target is not None:
__lowercase : Dict = self.tokenizer(_a , **_a )
__lowercase : Optional[Any] = targets["""input_ids"""]
else:
__lowercase : Any = None
if inputs is None:
return targets
if targets is not None:
__lowercase : str = labels
__lowercase : Any = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase : Tuple = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = kwargs.pop("""input_values""" , _a )
__lowercase : Dict = kwargs.pop("""input_ids""" , _a )
__lowercase : Dict = kwargs.pop("""labels""" , _a )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__lowercase : Any = self.feature_extractor.pad(_a , *_a , **_a )
elif input_ids is not None:
__lowercase : Optional[Any] = self.tokenizer.pad(_a , **_a )
else:
__lowercase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_a , _a ) and "input_ids" in labels[0]):
__lowercase : List[str] = self.tokenizer.pad(_a , **_a )
__lowercase : int = targets["""input_ids"""]
else:
__lowercase : Optional[Any] = self.feature_extractor.feature_size
__lowercase : Dict = self.feature_extractor.num_mel_bins
__lowercase : Union[str, Any] = self.feature_extractor.pad(_a , *_a , **_a )
__lowercase : str = feature_size_hack
__lowercase : List[str] = targets["""input_values"""]
else:
__lowercase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__lowercase : List[Any] = labels
__lowercase : Tuple = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase : Optional[int] = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : str , *__a : Dict , **__a : str ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def lowerCAmelCase ( self : Tuple , *__a : Dict , **__a : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ""
_A : List[str] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : str , __a : str = None , __a : Optional[Any] = None , **__a : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
__lowercase : Optional[int] = repo_info
__lowercase : Union[str, Any] = token
__lowercase : Optional[Any] = None
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
__lowercase : List[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__lowercase : Union[str, Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"""name""": str(UpperCamelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] = "rb" , **__a : Tuple , ) -> str:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__lowercase : List[Any] = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCAmelCase ( self : List[str] , __a : int , **__a : Optional[Any] ) -> Dict:
"""simple docstring"""
self._get_dirs()
__lowercase : Optional[int] = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : Optional[int]=False , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
self._get_dirs()
__lowercase : str = PurePosixPath(path.strip("""/""" ) )
__lowercase : Union[str, Any] = {}
for p, f in self.dir_cache.items():
__lowercase : Any = PurePosixPath(p.strip("""/""" ) )
__lowercase : int = p.parent
if root == path:
__lowercase : int = f
__lowercase : Tuple = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
from ...configuration_utils import PretrainedConfig
class lowerCAmelCase ( snake_case_ ):
'''simple docstring'''
_A : Dict = """bert-generation"""
def __init__( self : Dict , __a : Optional[int]=50358 , __a : int=1024 , __a : Tuple=24 , __a : Optional[int]=16 , __a : List[Any]=4096 , __a : List[str]="gelu" , __a : List[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[int]=0.02 , __a : Dict=1E-12 , __a : Union[str, Any]=0 , __a : Optional[int]=2 , __a : Union[str, Any]=1 , __a : List[Any]="absolute" , __a : Any=True , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : List[Any] = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[int] = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : List[str] = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : Optional[Any] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase : Tuple = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase : Optional[int] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase : int = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase : Optional[int] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase : List[Any] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase : Any = []
lowerCamelCase : Any = []
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] ):
for attribute in key.split(""".""" ):
__lowercase : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
__lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
__lowercase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : List[str] = value
elif weight_type == "weight_g":
__lowercase : int = value
elif weight_type == "weight_v":
__lowercase : Optional[int] = value
elif weight_type == "bias":
__lowercase : Tuple = value
elif weight_type == "running_mean":
__lowercase : Union[str, Any] = value
elif weight_type == "running_var":
__lowercase : str = value
elif weight_type == "num_batches_tracked":
__lowercase : Any = value
elif weight_type == "weight_ih_l0":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_hh_l0":
__lowercase : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
__lowercase : Union[str, Any] = value
elif weight_type == "bias_hh_l0":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_ih_l1":
__lowercase : int = value
elif weight_type == "weight_hh_l1":
__lowercase : List[Any] = value
elif weight_type == "bias_ih_l1":
__lowercase : Any = value
elif weight_type == "bias_hh_l1":
__lowercase : Tuple = value
else:
__lowercase : Tuple = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowercase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
__lowercase : List[Any] = MAPPING_24K
elif model_name == "encodec_48khz":
__lowercase : int = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info(F"{name} was ignored" )
continue
__lowercase : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__lowercase : int = key.split(""".*.""" )
if prefix in name and suffix in name:
__lowercase : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
__lowercase : Any = True
if "*" in mapped_key:
__lowercase : Optional[Any] = name.split(SCREAMING_SNAKE_CASE_ )[0].split(""".""" )[-2]
__lowercase : Optional[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
__lowercase : Any = "weight_g"
elif "weight_v" in name:
__lowercase : Tuple = "weight_v"
elif "weight_ih_l0" in name:
__lowercase : str = "weight_ih_l0"
elif "weight_hh_l0" in name:
__lowercase : Any = "weight_hh_l0"
elif "bias_ih_l0" in name:
__lowercase : Tuple = "bias_ih_l0"
elif "bias_hh_l0" in name:
__lowercase : int = "bias_hh_l0"
elif "weight_ih_l1" in name:
__lowercase : List[Any] = "weight_ih_l1"
elif "weight_hh_l1" in name:
__lowercase : Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
__lowercase : Union[str, Any] = "bias_ih_l1"
elif "bias_hh_l1" in name:
__lowercase : Optional[Any] = "bias_hh_l1"
elif "bias" in name:
__lowercase : Any = "bias"
elif "weight" in name:
__lowercase : Dict = "weight"
elif "running_mean" in name:
__lowercase : Tuple = "running_mean"
elif "running_var" in name:
__lowercase : int = "running_var"
elif "num_batches_tracked" in name:
__lowercase : List[str] = "num_batches_tracked"
else:
__lowercase : Dict = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , ):
if config_path is not None:
__lowercase : int = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__lowercase : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__lowercase : Any = [8, 5, 4, 4]
__lowercase : List[Any] = [2.2]
__lowercase : Optional[Any] = 64
__lowercase : Optional[Any] = 32000
__lowercase : int = 2048
__lowercase : int = False
__lowercase : int = False
__lowercase : Union[str, Any] = False
elif model_name == "encodec_48khz":
__lowercase : Any = [8, 5, 4, 2]
__lowercase : Tuple = [3.0, 6.0, 12.0, 24.0]
__lowercase : Any = 48000
__lowercase : List[Any] = 2
__lowercase : List[Any] = False
__lowercase : Optional[int] = "time_group_norm"
__lowercase : Optional[int] = True
__lowercase : Any = 1.0
__lowercase : Optional[Any] = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
__lowercase : str = EncodecModel(SCREAMING_SNAKE_CASE_ )
__lowercase : List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowercase : int = torch.load(SCREAMING_SNAKE_CASE_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__lowercase : Optional[int] = original_checkpoint["best_state"]
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
import requests
from bsa import BeautifulSoup
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] = "https://www.worldometers.info/coronavirus" ):
__lowercase : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , """html.parser""" )
__lowercase : int = soup.findAll("""h1""" )
__lowercase : Optional[int] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase ( __A ):
'''simple docstring'''
def __lt__( self : Tuple , __a : Dict ) -> Tuple:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Any , __a : List[Any] ) -> List[Any]:
"""simple docstring"""
return self[-1] == other[-1]
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : list[Stack] = []
# sort into stacks
for element in collection:
__lowercase : Union[str, Any] = Stack([element] )
__lowercase : Tuple = bisect_left(__lowercase , __lowercase )
if i != len(__lowercase ):
stacks[i].append(__lowercase )
else:
stacks.append(__lowercase )
# use a heap-based merge to merge stack efficiently
__lowercase : Any = merge(*(reversed(__lowercase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__lowercase : List[str] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : Any = dict(zip(_A , range(len(_A ) ) ) )
__lowercase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase : List[Any] = {"""unk_token""": """<unk>"""}
__lowercase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
__lowercase : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__lowercase : Any = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_A , _A )
def lowerCAmelCase ( self : str , **__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowerCAmelCase ( self : Union[str, Any] , **__a : int ) -> int:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def lowerCAmelCase ( self : Optional[Any] , **__a : int ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : str = self.get_tokenizer()
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Any = self.get_image_processor()
__lowercase : Tuple = CLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__lowercase : Union[str, Any] = CLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : List[str] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : Optional[Any] = CLIPProcessor(tokenizer=_A , image_processor=_A )
__lowercase : Dict = self.prepare_image_inputs()
__lowercase : List[str] = image_processor(_A , return_tensors="""np""" )
__lowercase : Tuple = processor(images=_A , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : str = CLIPProcessor(tokenizer=_A , image_processor=_A )
__lowercase : Optional[int] = """lower newer"""
__lowercase : List[Any] = processor(text=_A )
__lowercase : Union[str, Any] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : List[str] = CLIPProcessor(tokenizer=_A , image_processor=_A )
__lowercase : Optional[Any] = """lower newer"""
__lowercase : Any = self.prepare_image_inputs()
__lowercase : Optional[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[Any] = CLIPProcessor(tokenizer=_A , image_processor=_A )
__lowercase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Optional[int] = processor.batch_decode(_A )
__lowercase : Any = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : Tuple = CLIPProcessor(tokenizer=_A , image_processor=_A )
__lowercase : int = """lower newer"""
__lowercase : Tuple = self.prepare_image_inputs()
__lowercase : Union[str, Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCamelCase : Tuple = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCAmelCase ( self : str , __a : Any , __a : Optional[int] , __a : Optional[int]=None , __a : Tuple=True , __a : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
if rouge_types is None:
__lowercase : List[str] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
__lowercase : Dict = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__ )
if use_aggregator:
__lowercase : Optional[Any] = scoring.BootstrapAggregator()
else:
__lowercase : Optional[Any] = []
for ref, pred in zip(snake_case__ , snake_case__ ):
__lowercase : str = scorer.score(snake_case__ , snake_case__ )
if use_aggregator:
aggregator.add_scores(snake_case__ )
else:
scores.append(snake_case__ )
if use_aggregator:
__lowercase : List[str] = aggregator.aggregate()
else:
__lowercase : str = {}
for key in scores[0]:
__lowercase : Dict = [score[key] for score in scores]
return result
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
_A : Any = '''levit'''
def __init__( self : str , __a : Tuple=224 , __a : List[Any]=3 , __a : Any=3 , __a : str=2 , __a : List[str]=1 , __a : str=16 , __a : List[Any]=[128, 256, 384] , __a : str=[4, 8, 12] , __a : Dict=[4, 4, 4] , __a : List[Any]=[16, 16, 16] , __a : Tuple=0 , __a : Tuple=[2, 2, 2] , __a : Union[str, Any]=[2, 2, 2] , __a : str=0.02 , **__a : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**a_ )
__lowercase : Optional[Any] = image_size
__lowercase : int = num_channels
__lowercase : Tuple = kernel_size
__lowercase : List[Any] = stride
__lowercase : Optional[Any] = padding
__lowercase : List[str] = hidden_sizes
__lowercase : Tuple = num_attention_heads
__lowercase : Dict = depths
__lowercase : Optional[Any] = key_dim
__lowercase : List[str] = drop_path_rate
__lowercase : Optional[Any] = patch_size
__lowercase : Union[str, Any] = attention_ratio
__lowercase : Optional[int] = mlp_ratio
__lowercase : List[Any] = initializer_range
__lowercase : Tuple = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
_A : Any = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return 1E-4
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : Any ):
return "".join(chr(ord(_snake_case ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ( ):
raise RuntimeError("""CUDA out of memory.""" )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase : Optional[Any] = nn.Linear(3 , 4 )
__lowercase : Tuple = nn.BatchNormad(4 )
__lowercase : Optional[Any] = nn.Linear(4 , 5 )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE ) ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Tuple ):
nonlocal batch_sizes
batch_sizes.append(_SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Union[str, Any] , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__lowercase , __lowercase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : str ):
pass
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Optional[int] , __a : str , __a : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Optional[int] ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = torch.cuda.memory_allocated()
__lowercase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = release_memory(_SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE )
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCamelCase : int = True
from torch.cuda.amp import autocast
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[Any] = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Union[str, Any] = field(
default=_A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : List[str] = field(
default=_A , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_A : Union[str, Any] = field(
default=_A , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
_A : str = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
_A : Dict = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
_A : str = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ):
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowercase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
__lowercase : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowercase : Optional[int] = logging.INFO
logger.setLevel(A_ )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Any = field(
default=_A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_A : Optional[Any] = field(
default=_A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Optional[Any] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_A : Any = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_A : str = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
_A : int = field(
default=_A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_A : str = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_A : str = field(
default=_A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A : Tuple = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[int] = 42
_A : Union[str, Any] = 42
_A : int = '''longest'''
_A : Dict = None
_A : Any = None
def __call__( self : int , __a : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__lowercase : str = self.feature_extractor.pad(
UpperCamelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__lowercase : int = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
__lowercase : List[Any] = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowercase : int = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
__lowercase : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowercase : Dict = 1
__lowercase : Any = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowercase : int = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase__ , min_masks=2 , )
return batch
class lowerCAmelCase ( _A ):
'''simple docstring'''
def __init__( self : int , *__a : List[Any] , __a : Optional[int]=1 , __a : Dict=0 , __a : Union[str, Any]=1.0 , **__a : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
__lowercase : str = 0
__lowercase : List[Any] = max_gumbel_temp
__lowercase : List[Any] = min_gumbel_temp
__lowercase : List[Any] = gumbel_temp_decay
def lowerCAmelCase ( self : Any , __a : nn.Module , __a : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
__lowercase : Any = self._prepare_inputs(UpperCamelCase__ )
if self.use_amp:
with autocast():
__lowercase : Tuple = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ )
else:
__lowercase : List[str] = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase : str = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
__lowercase : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case_ ( ):
__lowercase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(A_ , A_ )
# Downloading and loading a dataset from the hub.
__lowercase : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowercase : List[Any] = DatasetDict()
__lowercase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowercase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowercase : List[str] = DatasetDict()
__lowercase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
__lowercase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowercase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A_ )
def prepare_dataset(lowerCAmelCase_ : Optional[Any] ):
# check that all files have the correct sampling rate
__lowercase , __lowercase : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowercase : Optional[Any] = datasets.map(
A_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
__lowercase : Union[str, Any] = vectorized_datasets.filter(
lambda lowerCAmelCase_ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCAmelCase_ : int ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowercase : Dict = vectorized_datasets.map(
A_ , batched=A_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowercase : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
__lowercase : List[str] = WavaVecaForPreTraining(A_ )
__lowercase : Any = DataCollatorForWavaVecaPretraining(model=A_ , feature_extractor=A_ )
__lowercase : List[Any] = WavaVecaPreTrainer(
model=A_ , data_collator=A_ , args=A_ , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=A_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def lowerCAmelCase ( self : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__lowercase : List[str] = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__lowercase : Tuple = Counter(_A )
__lowercase : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : Tuple=False ) -> str:
"""simple docstring"""
__lowercase : int = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : List[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__lowercase : List[str] = full_content[1:].index("""---""" ) + 1
__lowercase : List[str] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
_A : int = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowerCAmelCase ( cls : Dict , __a : str ) -> str:
"""simple docstring"""
with open(_A , encoding="""utf-8""" ) as readme_file:
__lowercase : int = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def lowerCAmelCase ( self : str , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if path.exists():
with open(_A , encoding="""utf-8""" ) as readme_file:
__lowercase : Tuple = readme_file.read()
else:
__lowercase : Dict = None
__lowercase : str = self._to_readme(_A )
with open(_A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(_A )
def lowerCAmelCase ( self : Any , __a : List[str] = None ) -> Union[str, Any]:
"""simple docstring"""
if readme_content is not None:
__lowercase : str = _split_yaml_from_readme(_A )
__lowercase : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__lowercase : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__lowercase : List[str] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase : Tuple = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCamelCase : Dict = ap.parse_args()
lowerCamelCase : Tuple = Path(args.readme_filepath)
lowerCamelCase : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Dict , __a : List[Any]=13 , __a : Union[str, Any]=2 , __a : int=24 , __a : Union[str, Any]=16 , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Dict=32 , __a : Dict=5 , __a : Any=4 , __a : Optional[int]=37 , __a : int="gelu" , __a : Any=0.1 , __a : Tuple=0.1 , __a : Dict=10 , __a : Union[str, Any]=0.02 , __a : Tuple=None , __a : Union[str, Any]=2 , __a : Any=2 , ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[int] = patch_size
__lowercase : int = max_length
__lowercase : Tuple = num_mel_bins
__lowercase : Optional[int] = is_training
__lowercase : List[str] = use_labels
__lowercase : List[Any] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : int = type_sequence_label_size
__lowercase : str = initializer_range
__lowercase : Optional[Any] = scope
__lowercase : int = frequency_stride
__lowercase : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__lowercase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
__lowercase : Dict = frequency_out_dimension * time_out_dimension
__lowercase : List[Any] = num_patches + 2
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = self.get_config()
return config, input_values, labels
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase ( self : str , __a : List[Any] , __a : Union[str, Any] , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ASTModel(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : Dict = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_A : Any = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
_A : Union[str, Any] = False
_A : Dict = False
_A : Optional[Any] = False
_A : str = False
def lowerCAmelCase ( self : List[str] , __a : List[Any] , __a : Dict , __a : List[str] , __a : str , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Tuple = ASTModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : str = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Any = ["""input_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = ASTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Optional[int] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__lowercase , __lowercase : int = torchaudio.load(__snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.default_feature_extractor
__lowercase : Any = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__a )
__lowercase : Dict = self.default_feature_extractor
__lowercase , __lowercase : Tuple = prepare_audio()
__lowercase : int = audio.squeeze().numpy()
__lowercase : Optional[int] = feature_extractor(__a , sampling_rate=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Union[str, Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = tempfile.mkdtemp()
# fmt: off
__lowercase : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__lowercase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase : Any = {"""unk_token""": """<unk>"""}
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
__lowercase : str = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__lowercase : Tuple = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[int] , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_rust_tokenizer()
__lowercase : str = self.get_image_processor()
__lowercase : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
__lowercase : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
__lowercase : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.get_image_processor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : Optional[int] = self.prepare_image_inputs()
__lowercase : Any = image_processor(lowerCamelCase_ , return_tensors="""np""" )
__lowercase : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : int = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : Any = """lower newer"""
__lowercase : Dict = processor(text=lowerCamelCase_ )
__lowercase : Optional[int] = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : Dict = self.prepare_image_inputs()
__lowercase : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Optional[Any] = processor.batch_decode(lowerCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : Tuple = """lower newer"""
__lowercase : Any = self.prepare_image_inputs()
__lowercase : int = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
def count_of_possible_combinations(lowerCAmelCase_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A__ )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
def count_of_possible_combinations_with_dp_array(
lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowercase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , A__ )
for item in array )
__lowercase : Dict = answer
return answer
__lowercase : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A__ , A__ )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
__lowercase : Tuple = [0] * (target + 1)
__lowercase : str = 1
for i in range(1 , target + 1 ):
for j in range(A__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = 3
lowerCamelCase : Any = 5
lowerCamelCase : str = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCamelCase : Any = get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=0 ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase : str = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase : Dict = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase : Tuple = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Saving model to {output_model_file}" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase : Optional[Any] = os.path.join(lowerCamelCase__ , F"{MODEL_NAME}_{model_index}" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F"Saving model to {ckpt_dir}" )
__lowercase : Optional[Any] = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"Model saved to {ckpt_dir}" )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowercase : Union[str, Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Loading model from {input_model_file}" )
__lowercase : List[str] = torch.load(lowerCamelCase__ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase : Any = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Loading model from {input_model_file}" )
__lowercase : Union[str, Any] = torch.load(lowerCamelCase__ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase : Optional[int] = (
os.path.join(lowerCamelCase__ , F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
__lowercase : int = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , planner=DefaultLoadPlanner() , )
__lowercase : Tuple = state_dict["model"]
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(lowerCamelCase__ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=0 ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase : Dict = FSDP.optim_state_dict(lowerCamelCase__ , lowerCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowercase : Optional[Any] = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
__lowercase : str = os.path.join(lowerCamelCase__ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowercase : Any = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
__lowercase : int = torch.load(lowerCamelCase__ )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
__lowercase : List[Any] = (
os.path.join(lowerCamelCase__ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
__lowercase : Optional[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , )
__lowercase : Dict = optim_state["optimizer"]
logger.info(F"Optimizer loaded from {ckpt_dir}" )
__lowercase : Optional[Any] = FSDP.optim_state_dict_to_load(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
optimizer.load_state_dict(lowerCamelCase__ )
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
import sys
import turtle
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCamelCase : Union[str, Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Union[str, Any] = 2_56
class lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = ['''melgan''']
def __init__( self : Dict , __a : str , __a : Optional[int] , __a : str , __a : str , __a : int , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
__lowercase : Dict = math.log(1E-5 ) # Matches MelGAN training.
__lowercase : Tuple = 4.0 # Largest value for most examples
__lowercase : Optional[int] = 128
self.register_modules(
notes_encoder=lowerCAmelCase_ , continuous_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , melgan=lowerCAmelCase_ , )
def lowerCAmelCase ( self : Union[str, Any] , __a : int , __a : List[str]=(-1.0, 1.0) , __a : int=False ) -> str:
"""simple docstring"""
__lowercase , __lowercase : Dict = output_range
if clip:
__lowercase : Tuple = torch.clip(lowerCAmelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
__lowercase : str = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase ( self : List[Any] , __a : Optional[Any] , __a : Optional[Any]=(-1.0, 1.0) , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = input_range
__lowercase : Any = torch.clip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if clip else outputs
# Scale to [0, 1].
__lowercase : int = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : List[str] , __a : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = input_tokens > 0
__lowercase , __lowercase : Union[str, Any] = self.notes_encoder(
encoder_input_tokens=lowerCAmelCase_ , encoder_inputs_mask=lowerCAmelCase_ )
__lowercase , __lowercase : List[Any] = self.continuous_encoder(
encoder_inputs=lowerCAmelCase_ , encoder_inputs_mask=lowerCAmelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase ( self : str , __a : Optional[Any] , __a : List[Any] , __a : Tuple ) -> int:
"""simple docstring"""
__lowercase : List[Any] = noise_time
if not torch.is_tensor(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowercase : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowercase : Any = self.decoder(
encodings_and_masks=lowerCAmelCase_ , decoder_input_tokens=lowerCAmelCase_ , decoder_noise_time=lowerCAmelCase_ )
return logits
@torch.no_grad()
def __call__( self : int , __a : Any , __a : Any = None , __a : List[Any] = 100 , __a : Union[str, Any] = True , __a : str = "numpy" , __a : str = None , __a : Union[str, Any] = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCAmelCase_ )}." )
__lowercase : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowercase : List[Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowercase : Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowerCAmelCase_ ):
if i == 0:
__lowercase : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowercase : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowercase : Any = ones
__lowercase : Union[str, Any] = self.scale_features(
lowerCAmelCase_ , output_range=[-1.0, 1.0] , clip=lowerCAmelCase_ )
__lowercase : Optional[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowerCAmelCase_ , continuous_mask=lowerCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowercase : Optional[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowerCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase : Optional[Any] = self.decode(
encodings_and_masks=lowerCAmelCase_ , input_tokens=lowerCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowercase : Optional[int] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase : List[str] = self.scale_to_features(lowerCAmelCase_ , input_range=[-1.0, 1.0] )
__lowercase : Tuple = mel[:1]
__lowercase : Optional[Any] = mel.cpu().float().numpy()
__lowercase : List[str] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("""Generated segment""" , lowerCAmelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
__lowercase : Dict = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowercase : List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCAmelCase_ )
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase : str = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
super().setUp()
__lowercase : List[str] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=UpperCAmelCase__ , )
__lowercase : Optional[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
MarianMTModel.from_pretrained(UpperCAmelCase__ )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__lowercase : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split("""finetune.py""" )[1].strip()
__lowercase : Dict = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
__lowercase : Tuple = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
__lowercase : Tuple = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowercase : Union[str, Any] = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowercase : str = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ):
__lowercase : str = argparse.ArgumentParser()
__lowercase : List[str] = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
__lowercase : Dict = SummarizationModule.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
__lowercase : List[str] = parser.parse_args()
__lowercase : str = main(UpperCAmelCase__ )
# Check metrics
__lowercase : Tuple = load_json(model.metrics_save_path )
__lowercase : Optional[int] = metrics['''val'''][0]
__lowercase : List[str] = metrics['''val'''][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , UpperCAmelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowercase : Dict = os.listdir(UpperCAmelCase__ )
__lowercase : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
__lowercase : List[str] = os.path.join(args.output_dir , UpperCAmelCase__ )
__lowercase : str = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
__lowercase : Optional[int] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowercase : int = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class lowerCAmelCase ( __a ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
__lowercase : Tuple = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__lowercase : str = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split("""distillation.py""" )[1].strip()
)
__lowercase : Tuple = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
__lowercase : Dict = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
__lowercase : Optional[int] = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
__lowercase : str = 6
__lowercase : Dict = (
['''distillation.py''']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
F"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ):
__lowercase : int = argparse.ArgumentParser()
__lowercase : Optional[int] = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
__lowercase : List[Any] = SummarizationDistiller.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
__lowercase : int = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowercase : Any = distill_main(UpperCAmelCase__ )
# Check metrics
__lowercase : Optional[Any] = load_json(model.metrics_save_path )
__lowercase : Any = metrics['''val'''][0]
__lowercase : int = metrics['''val'''][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , UpperCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowercase : List[str] = os.listdir(UpperCAmelCase__ )
__lowercase : int = [x for x in contents if x.endswith(""".ckpt""" )][0]
__lowercase : str = os.path.join(args.output_dir , UpperCAmelCase__ )
__lowercase : Any = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
__lowercase : Any = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowercase : List[Any] = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : int , __a : int , __a : Optional[int] , __a : Optional[int] , __a : int , __a : Any , __a : Tuple , __a : str , __a : Union[str, Any] = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase : List[str] = nn.Embedding(snake_case__ , snake_case__ )
__lowercase : str = nn.Embedding(snake_case__ , snake_case__ )
__lowercase : Tuple = False
__lowercase : Union[str, Any] = nn.Dropout(p=snake_case__ )
__lowercase : Optional[int] = TaConfig(
vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , )
__lowercase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(snake_case__ ):
__lowercase : Tuple = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
__lowercase : List[Any] = TaLayerNorm(snake_case__ )
__lowercase : Optional[int] = nn.Dropout(p=snake_case__ )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = self.token_embedder(snake_case__ )
__lowercase : Any = encoder_input_tokens.shape[1]
__lowercase : Tuple = torch.arange(snake_case__ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
__lowercase : Dict = self.dropout_pre(snake_case__ )
# inverted the attention mask
__lowercase : Union[str, Any] = encoder_input_tokens.size()
__lowercase : Dict = self.get_extended_attention_mask(snake_case__ , snake_case__ )
for lyr in self.encoders:
__lowercase : int = lyr(snake_case__ , snake_case__ )[0]
__lowercase : Tuple = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = set(__a ), [start]
while stack:
__lowercase : Tuple = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase : Any = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __a : str , __a : Optional[int]=7 , __a : Tuple=3 , __a : Any=18 , __a : Tuple=30 , __a : int=400 , __a : Optional[int]=True , __a : Dict=None , __a : str=True , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
__lowercase : Optional[int] = parent
__lowercase : Dict = batch_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[str] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : str = max_resolution
__lowercase : Dict = do_resize
__lowercase : Optional[int] = size
__lowercase : List[str] = do_normalize
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( _snake_case , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """clusters""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
__lowercase : Dict = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[Any] = os.path.join(snake_case_ , """image_processor.json""" )
image_processor_first.to_json_file(snake_case_ )
__lowercase : str = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
__lowercase : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
__lowercase : Any = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
__lowercase : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
pass
def snake_case_ ( ):
__lowercase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowercase : List[str] = Image.open(dataset[4]["""file"""] )
__lowercase : List[str] = Image.open(dataset[5]["""file"""] )
__lowercase : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowercase : Any = prepare_images()
# test non-batched
__lowercase : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__lowercase : Optional[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
__lowercase : Dict = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__lowercase : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any]=None , __a : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = list(poly_a or [0] )[:]
__lowercase : Optional[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowercase : Any = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowercase : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowercase : int = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowercase : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowercase : Tuple = self.__multiply()
def lowerCAmelCase ( self : Union[str, Any] , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
__lowercase : Any = self.c_max_length // 2
while next_ncol > 0:
__lowercase : Optional[int] = [[] for i in range(UpperCAmelCase__ )]
__lowercase : Tuple = self.root**next_ncol
# First half of next step
__lowercase : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowercase : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowercase : Dict = new_dft
__lowercase : Tuple = next_ncol // 2
return dft[0]
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.__dft("""A""" )
__lowercase : Any = self.__dft("""B""" )
__lowercase : Optional[int] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowercase : Optional[int] = 2
while next_ncol <= self.c_max_length:
__lowercase : Optional[int] = [[] for i in range(UpperCAmelCase__ )]
__lowercase : List[str] = self.root ** (next_ncol // 2)
__lowercase : Optional[int] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowercase : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
__lowercase : Tuple = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Any = '''A = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowercase : Tuple = '''B = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowercase : List[str] = '''A*B = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Union[str, Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Optional[int] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict="" , __a : Optional[int]="train" ) -> str:
"""simple docstring"""
assert os.path.isdir(__lowerCamelCase )
__lowercase : Union[str, Any] = []
__lowercase : List[str] = os.listdir(__lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ):
continue
self.documents.append(__lowerCamelCase )
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : str , __a : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = self.documents[idx]
__lowercase : str = document_path.split("""/""" )[-1]
with open(__lowerCamelCase , encoding="""utf-8""" ) as source:
__lowercase : Dict = source.read()
__lowercase : Union[str, Any] = process_story(__lowerCamelCase )
return document_name, story_lines, summary_lines
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : Any = list(filter(lambda lowerCAmelCase_ : len(lowerCamelCase_ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase : Optional[int] = [_add_missing_period(lowerCamelCase_ ) for line in nonempty_lines]
# gather article lines
__lowercase : Tuple = []
__lowercase : Tuple = deque(lowerCamelCase_ )
while True:
try:
__lowercase : Dict = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(lowerCamelCase_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase : Any = list(filter(lambda lowerCAmelCase_ : not t.startswith("""@highlight""" ) , lowerCamelCase_ ) )
return story_lines, summary_lines
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
if len(lowerCamelCase_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowerCamelCase_ )) )
return sequence
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = torch.ones_like(lowerCamelCase_ )
__lowercase : Any = sequence == pad_token_id
__lowercase : List[str] = 0
return mask
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
__lowercase : Optional[Any] = [tokenizer.encode(lowerCamelCase_ ) for line in story_lines]
__lowercase : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase : Tuple = [tokenizer.encode(lowerCamelCase_ ) for line in summary_lines]
__lowercase : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str ):
__lowercase : Tuple = []
for sequence in batch:
__lowercase : str = -1
__lowercase : Tuple = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowerCamelCase_ )
return torch.tensor(lowerCamelCase_ )
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCamelCase : Any = get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=0 ):
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase : Union[str, Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase : int = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Saving model to {output_model_file}" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase : List[str] = os.path.join(UpperCamelCase__ , F"{MODEL_NAME}_{model_index}" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(F"Saving model to {ckpt_dir}" )
__lowercase : List[str] = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=UpperCamelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"Model saved to {ckpt_dir}" )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowercase : List[str] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Loading model from {input_model_file}" )
__lowercase : Any = torch.load(UpperCamelCase__ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase : str = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Loading model from {input_model_file}" )
__lowercase : Union[str, Any] = torch.load(UpperCamelCase__ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase : Dict = (
os.path.join(UpperCamelCase__ , F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
__lowercase : Tuple = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCamelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , planner=DefaultLoadPlanner() , )
__lowercase : str = state_dict["""model"""]
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(UpperCamelCase__ )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any=0 ):
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase : Optional[int] = FSDP.optim_state_dict(UpperCamelCase__ , UpperCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowercase : Tuple = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
__lowercase : Dict = os.path.join(UpperCamelCase__ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase : Dict = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowercase : int = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
__lowercase : Any = torch.load(UpperCamelCase__ )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
__lowercase : Optional[int] = (
os.path.join(UpperCamelCase__ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
__lowercase : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , )
__lowercase : int = optim_state["""optimizer"""]
logger.info(F"Optimizer loaded from {ckpt_dir}" )
__lowercase : List[Any] = FSDP.optim_state_dict_to_load(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
optimizer.load_state_dict(UpperCamelCase__ )
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=False ):
try:
__lowercase : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowercase : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
__lowercase : Dict = strtobool(__SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
lowerCamelCase : List[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return unittest.skip("""Test was skipped""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : int ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : str ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Dict ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=None ):
if test_case is None:
return partial(__SCREAMING_SNAKE_CASE , version=__SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version(""">=""" , __SCREAMING_SNAKE_CASE ) , F"test requires torch version >= {version}" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : int ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case_ ( lowerCAmelCase_ : str ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = True
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase ( cls : Optional[int] ) -> List[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Any = AcceleratorState()
__lowercase : Optional[int] = tensor[None].clone().to(state.device )
__lowercase : Any = gather(__SCREAMING_SNAKE_CASE ).cpu()
__lowercase : List[str] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __SCREAMING_SNAKE_CASE ):
return False
return True
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : str , __a : List[Any] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = returncode
__lowercase : List[Any] = stdout
__lowercase : Tuple = stderr
async def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
while True:
__lowercase : List[str] = await stream.readline()
if line:
callback(__SCREAMING_SNAKE_CASE )
else:
break
async def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=False ):
if echo:
print("""\nRunning: """ , """ """.join(__SCREAMING_SNAKE_CASE ) )
__lowercase : List[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowercase : Any = []
__lowercase : Any = []
def tee(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any="" ):
__lowercase : int = line.decode("""utf-8""" ).rstrip()
sink.append(__SCREAMING_SNAKE_CASE )
if not quiet:
print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , file=__SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase_ : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase_ : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str=180 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[str]=True ):
__lowercase : Optional[int] = asyncio.get_event_loop()
__lowercase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , stdin=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE , echo=__SCREAMING_SNAKE_CASE ) )
__lowercase : Optional[Any] = " ".join(__SCREAMING_SNAKE_CASE )
if result.returncode > 0:
__lowercase : List[Any] = "\n".join(result.stderr )
raise RuntimeError(
F"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
pass
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int=False ):
try:
__lowercase : Dict = subprocess.check_output(__SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__SCREAMING_SNAKE_CASE , """decode""" ):
__lowercase : int = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[str] , __a : List[Any]=100 , __a : Any=13 , __a : Dict=30 , __a : Optional[int]=2 , __a : str=3 , __a : Tuple=True , __a : str=True , __a : Union[str, Any]=32 , __a : int=5 , __a : List[Any]=4 , __a : Optional[Any]=37 , __a : Any="gelu" , __a : List[str]=0.1 , __a : int=0.1 , __a : Tuple=10 , __a : int=0.02 , __a : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = parent
__lowercase : Union[str, Any] = vocab_size
__lowercase : str = batch_size
__lowercase : Optional[Any] = image_size
__lowercase : Dict = patch_size
__lowercase : Any = num_channels
__lowercase : List[Any] = is_training
__lowercase : Tuple = use_labels
__lowercase : Optional[Any] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Union[str, Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Optional[Any] = type_sequence_label_size
__lowercase : Any = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Dict = (image_size // patch_size) ** 2
__lowercase : Any = num_patches + 1
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : str , __a : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = FlaxBeitModel(config=A_ )
__lowercase : str = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : Any , __a : Any , __a : str ) -> str:
"""simple docstring"""
__lowercase : str = FlaxBeitForMaskedImageModeling(config=A_ )
__lowercase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase ( self : str , __a : List[Any] , __a : Optional[int] , __a : List[str] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.type_sequence_label_size
__lowercase : Dict = FlaxBeitForImageClassification(config=A_ )
__lowercase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase : Optional[int] = 1
__lowercase : List[Any] = FlaxBeitForImageClassification(A_ )
__lowercase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : Optional[Any] = model(A_ )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
__lowercase : Tuple = FlaxBeitModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = model_class(A_ )
__lowercase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Tuple = [*signature.parameters.keys()]
__lowercase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : List[str] = self._prepare_for_class(A_ , A_ )
__lowercase : Union[str, Any] = model_class(A_ )
@jax.jit
def model_jitted(__a : Union[str, Any] , **__a : Union[str, Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest("""JIT Enabled""" ):
__lowercase : Any = model_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase : Optional[Any] = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : Optional[Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
__lowercase : str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
__lowercase : List[Any] = self.default_image_processor
__lowercase : List[Any] = prepare_img()
__lowercase : List[str] = image_processor(images=A_ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
__lowercase : List[str] = np.ones((1, 196) , dtype=A_ )
# forward pass
__lowercase : Tuple = model(pixel_values=A_ , bool_masked_pos=A_ )
__lowercase : int = outputs.logits
# verify the logits
__lowercase : Tuple = (1, 196, 8192)
self.assertEqual(logits.shape , A_ )
__lowercase : Tuple = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
__lowercase : Optional[Any] = self.default_image_processor
__lowercase : Union[str, Any] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=A_ , return_tensors="""np""" )
# forward pass
__lowercase : Dict = model(**A_ )
__lowercase : List[str] = outputs.logits
# verify the logits
__lowercase : Dict = (1, 1000)
self.assertEqual(logits.shape , A_ )
__lowercase : int = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__lowercase : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Tuple = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
__lowercase : Any = self.default_image_processor
__lowercase : Any = prepare_img()
__lowercase : int = image_processor(images=A_ , return_tensors="""np""" )
# forward pass
__lowercase : Any = model(**A_ )
__lowercase : List[Any] = outputs.logits
# verify the logits
__lowercase : Optional[Any] = (1, 21841)
self.assertEqual(logits.shape , A_ )
__lowercase : int = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__lowercase : Dict = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase : Optional[int] = 'src/transformers'
# Matches is_xxx_available()
lowerCamelCase : Dict = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase : Optional[Any] = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase : int = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase : Tuple = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase : Optional[int] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase : Any = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase : Union[str, Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase : Any = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase : Any = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase : List[str] = re.compile(r'''^\s*else:''')
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
__lowercase : str = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def snake_case_ ( lowerCAmelCase_ : int ):
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : List[Any] = f.readlines()
__lowercase : Any = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase : Tuple = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__lowercase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
__lowercase : List[Any] = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
__lowercase : int = re.findall("""\[([^\]]+)\]""" , _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__lowercase : Any = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
__lowercase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__lowercase : int = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__lowercase : Any = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
__lowercase : Tuple = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(""", """ )
__lowercase : Tuple = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
__lowercase : int = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(""", """ )
__lowercase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__lowercase : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase : str = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__lowercase : Union[str, Any] = lines[line_index]
__lowercase : List[str] = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase : List[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__lowercase : Optional[Any] = lines[line_index]
__lowercase : Dict = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
def find_duplicates(lowerCAmelCase_ : Optional[Any] ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase : Union[str, Any] = []
for key in import_dict_objects.keys():
__lowercase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowercase : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase : Optional[int] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def snake_case_ ( ):
__lowercase : int = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
__lowercase : Optional[int] = os.path.join(_lowerCamelCase , """__init__.py""" )
__lowercase : Optional[int] = parse_init(_lowerCamelCase )
if objects is not None:
__lowercase : Tuple = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowercase : Dict = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCamelCase ) )
def snake_case_ ( ):
__lowercase : Optional[int] = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__lowercase : Optional[int] = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
__lowercase : str = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowercase : Optional[int] = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
__lowercase : str = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
lowerCamelCase : Any = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case_ ( ):
__lowercase : List[Any] = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_lowerCamelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase : Optional[int] = spec.loader.load_module()
__lowercase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
__lowercase : str = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Union[str, Any] = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def snake_case_ ( lowerCAmelCase_ : str ):
def decorator(lowerCAmelCase_ : Dict ):
__lowercase : Optional[Any] = getattr(lowerCAmelCase_ , """handle_key""" , [] )
handle += [key]
setattr(lowerCAmelCase_ , """handle_key""" , lowerCAmelCase_ )
return func
return decorator
def snake_case_ ( *lowerCAmelCase_ : List[str] ):
def decorator(lowerCAmelCase_ : int ):
__lowercase : str = getattr(lowerCAmelCase_ , """handle_key""" , [] )
handle += keys
setattr(lowerCAmelCase_ , """handle_key""" , lowerCAmelCase_ )
return func
return decorator
class lowerCAmelCase ( A_ ):
'''simple docstring'''
def __new__( cls : Any , __a : List[str] , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = super().__new__(cls , __a , __a , __a )
if not hasattr(__a , """key_handler""" ):
setattr(__a , """key_handler""" , {} )
setattr(__a , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase : int = getattr(__a , """handle_key""" , [] )
for key in handled_keys:
__lowercase : Dict = value
return new_cls
@staticmethod
def lowerCAmelCase ( cls : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = get_character()
if char != KEYMAP["undefined"]:
__lowercase : List[str] = ord(__a )
__lowercase : Dict = cls.key_handler.get(__a )
if handler:
__lowercase : Union[str, Any] = char
return handler(cls )
else:
return None
def snake_case_ ( cls : List[str] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Dict = len(lowerCAmelCase__ )
__lowercase : List[Any] = len(lowerCAmelCase__ )
__lowercase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Optional[Any] = True
for i in range(lowerCAmelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Union[str, Any] = True
if a[i].islower():
__lowercase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase : List[str] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Dict=None , ):
if attention_mask is None:
__lowercase : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowercase : str = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowercase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : int , __a : List[Any]=13 , __a : int=7 , __a : Dict=True , __a : List[str]=False , __a : Tuple=99 , __a : str=16 , __a : Optional[Any]=2 , __a : int=4 , __a : Dict=4 , __a : Optional[Any]="gelu" , __a : List[str]=0.1 , __a : Optional[Any]=0.1 , __a : Tuple=32 , __a : List[Any]=2 , __a : Tuple=1 , __a : Optional[int]=0 , __a : Dict=0.02 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : int = batch_size
__lowercase : Dict = seq_length
__lowercase : Dict = is_training
__lowercase : List[Any] = use_labels
__lowercase : str = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : str = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : Dict = eos_token_id
__lowercase : Any = pad_token_id
__lowercase : Dict = bos_token_id
__lowercase : Optional[Any] = initializer_range
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowercase : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowercase : int = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
__lowercase : List[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , )
__lowercase : Optional[int] = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[Any] , __a : int ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = 20
__lowercase : Dict = model_class_name(lowerCamelCase_ )
__lowercase : Dict = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
__lowercase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowercase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
__lowercase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
__lowercase : Optional[int] = model.decode(lowerCamelCase_ , lowerCamelCase_ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase ( self : List[Any] , __a : List[Any] , __a : Tuple , __a : int ) -> List[str]:
"""simple docstring"""
__lowercase : int = 20
__lowercase : Any = model_class_name(lowerCamelCase_ )
__lowercase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
__lowercase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
__lowercase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
__lowercase : int = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
__lowercase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : int = 99
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowercase : List[Any] = input_ids.shape[0]
__lowercase : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase : int = self._get_config_and_data()
__lowercase : Dict = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
__lowercase : str = lm_model(input_ids=lowerCamelCase_ )
__lowercase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase_ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowercase : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
__lowercase : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowercase : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowercase : Optional[Any] = lm_model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
__lowercase : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowercase : Optional[int] = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
__lowercase : Optional[Any] = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
__lowercase : Union[str, Any] = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase ( a__ , unittest.TestCase , a__ ):
'''simple docstring'''
_A : Optional[int] = True
_A : Tuple = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_A : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Any = FlaxBlenderbotSmallModelTester(self )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : List[str] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
__lowercase : Dict = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(__a : Tuple , __a : int=None , **__a : List[Any] ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowercase : Any = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase : int = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : List[Any] = model_class(lowerCamelCase_ )
__lowercase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowercase : Tuple = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__a : Tuple , __a : int , __a : Any ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest("""JIT Enabled""" ):
__lowercase : Any = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase : Tuple = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : Dict = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowercase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowercase : Optional[Any] = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Tuple , ) -> int:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Union[str, Any] = 13
__lowercase : Optional[Any] = 7
__lowercase : Any = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Optional[int] = True
__lowercase : Optional[Any] = True
__lowercase : Any = False
__lowercase : List[str] = False
__lowercase : Tuple = False
__lowercase : Any = 2
__lowercase : Any = 99
__lowercase : Optional[int] = 0
__lowercase : int = 32
__lowercase : Any = 2
__lowercase : Optional[int] = 4
__lowercase : List[str] = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Optional[Any] = 512
__lowercase : str = 16
__lowercase : List[Any] = 2
__lowercase : List[str] = 0.02
__lowercase : Tuple = 3
__lowercase : Optional[Any] = 4
__lowercase : Tuple = """last"""
__lowercase : str = True
__lowercase : List[str] = None
__lowercase : Optional[Any] = 0
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__lowercase : List[Any] = None
if self.use_input_lengths:
__lowercase : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : Optional[int] = None
if self.use_token_type_ids:
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : Tuple = None
__lowercase : List[Any] = None
__lowercase : Any = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : Optional[Any] , __a : Any , __a : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int] , __a : Dict , __a : str , __a : Union[str, Any] , ) -> str:
"""simple docstring"""
__lowercase : List[str] = TFFlaubertModel(config=lowerCAmelCase_ )
__lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase : Dict = model(lowerCAmelCase_ )
__lowercase : Dict = [input_ids, input_mask]
__lowercase : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : Tuple , __a : List[str] , __a : List[Any] , __a : List[str] , __a : str , __a : Optional[Any] , __a : Dict , __a : str , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = TFFlaubertWithLMHeadModel(lowerCAmelCase_ )
__lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] , __a : Optional[int] , __a : Any , ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase_ )
__lowercase : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str , __a : List[str] , __a : Optional[Any] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Dict , __a : int , __a : Any , __a : str , ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = TFFlaubertForSequenceClassification(lowerCAmelCase_ )
__lowercase : Any = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Any , __a : int , __a : str , __a : List[str] , __a : Optional[Any] , __a : Optional[Any] , __a : int , __a : Union[str, Any] , __a : int , __a : Tuple , ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : str = TFFlaubertForTokenClassification(config=lowerCAmelCase_ )
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[Any] , __a : int , __a : Any , __a : Union[str, Any] , __a : int , __a : int , __a : int , __a : int , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : str = TFFlaubertForMultipleChoice(config=lowerCAmelCase_ )
__lowercase : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : Tuple = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : List[str] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A : Union[str, Any] = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Any , __a : Any ) -> Union[str, Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = TFFlaubertModelTester(self )
__lowercase : int = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Any = TFFlaubertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Any = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__lowercase : Any = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__lowercase : int = model(lowerCAmelCase_ )[0]
__lowercase : Tuple = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
__lowercase : int = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__lowercase : int = True if '''large''' in model_name or '''huge''' in model_name else False
__lowercase : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
__lowercase : List[str] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase : int = [3, 3, 3, 3]
__lowercase : str = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase : List[str] = [4, 4, 4, 4]
__lowercase : Union[str, Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase : Any = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase : Optional[Any] = [3, 3, 3, 3]
else:
__lowercase : Any = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase : Dict = 96
elif "small" in model_name:
__lowercase : Any = 96
elif "base" in model_name:
__lowercase : Union[str, Any] = 128
elif "large" in model_name:
__lowercase : List[Any] = 192
elif "xlarge" in model_name:
__lowercase : List[Any] = 256
elif "huge" in model_name:
__lowercase : Optional[Any] = 352
# set label information
__lowercase : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__lowercase : Optional[Any] = '''imagenet-22k-id2label.json'''
else:
__lowercase : Union[str, Any] = '''imagenet-1k-id2label.json'''
__lowercase : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Dict = {int(a_ ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Any = FocalNetConfig(
embed_dim=a_ , depths=a_ , focal_levels=a_ , focal_windows=a_ , use_conv_embed=a_ , idalabel=a_ , labelaid=a_ , use_post_layernorm=a_ , use_layerscale=a_ , )
return config
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if "patch_embed.proj" in name:
__lowercase : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowercase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowercase : Any = '''encoder.''' + name
if "encoder.layers" in name:
__lowercase : Any = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__lowercase : Optional[int] = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__lowercase : int = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase : int = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase : Union[str, Any] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase : Tuple = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__lowercase : Tuple = '''layernorm.weight'''
if name == "norm.bias":
__lowercase : List[Any] = '''layernorm.bias'''
if "head" in name:
__lowercase : List[Any] = name.replace("""head""" , """classifier""" )
else:
__lowercase : Tuple = '''focalnet.''' + name
return name
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=False ):
# fmt: off
__lowercase : Optional[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__lowercase : Optional[int] = model_name_to_url[model_name]
print("""Checkpoint URL: """ , a_ )
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(a_ , map_location="""cpu""" )['''model''']
# rename keys
for key in state_dict.copy().keys():
__lowercase : Union[str, Any] = state_dict.pop(a_ )
__lowercase : Union[str, Any] = val
__lowercase : Optional[int] = get_focalnet_config(a_ )
__lowercase : Union[str, Any] = FocalNetForImageClassification(a_ )
model.eval()
# load state dict
model.load_state_dict(a_ )
# verify conversion
__lowercase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Tuple = BitImageProcessor(
do_resize=a_ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=a_ , crop_size=224 , do_normalize=a_ , image_mean=a_ , image_std=a_ , )
__lowercase : Dict = Image.open(requests.get(a_ , stream=a_ ).raw )
__lowercase : Union[str, Any] = processor(images=a_ , return_tensors="""pt""" )
__lowercase : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase : List[str] = image_transforms(a_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a_ , atol=1e-4 )
__lowercase : Optional[Any] = model(**a_ )
__lowercase : int = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase : Optional[Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase : Dict = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase : Any = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase : List[str] = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase : Any = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase : Dict = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if push_to_hub:
print(F"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(F"{model_name}" )
processor.push_to_hub(F"{model_name}" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase ( _a ):
'''simple docstring'''
_A : List[Any] = 42
class lowerCAmelCase ( _a , _a ):
'''simple docstring'''
_A : int = True
@register_to_config
def __init__( self : Tuple , __a : Tuple = 3 , __a : Tuple = 3 , __a : Optional[int] = ("DownEncoderBlock2D",) , __a : List[str] = ("UpDecoderBlock2D",) , __a : Any = (64,) , __a : Tuple = 1 , __a : List[str] = "silu" , __a : Union[str, Any] = 4 , __a : Union[str, Any] = 32 , __a : Union[str, Any] = 32 , __a : Union[str, Any] = 0.18215 , ) -> Tuple:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowercase : str = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__lowercase : str = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__lowercase : List[str] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowercase : Any = nn.Convad(_A , _A , 1 )
__lowercase : str = False
__lowercase : Any = False
# only relevant if vae tiling is enabled
__lowercase : Optional[Any] = self.config.sample_size
__lowercase : Tuple = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowercase : Optional[int] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowercase : Optional[int] = 0.25
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Dict=False ) -> Dict:
"""simple docstring"""
if isinstance(_A , (Encoder, Decoder) ):
__lowercase : List[str] = value
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] = True ) -> str:
"""simple docstring"""
__lowercase : List[Any] = use_tiling
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
self.enable_tiling(_A )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : str = True
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = {}
def fn_recursive_add_processors(__a : List[str] , __a : Tuple , __a : List[str] ):
if hasattr(_A , """set_processor""" ):
__lowercase : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def lowerCAmelCase ( self : str , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(_A )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__a : Optional[int] , __a : Union[str, Any] , __a : List[str] ):
if hasattr(_A , """set_processor""" ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase ( self : List[Any] , __a : int , __a : Union[str, Any] = True ) -> Any:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__lowercase : Optional[Any] = [self.encoder(_A ) for x_slice in x.split(1 )]
__lowercase : List[str] = torch.cat(_A )
else:
__lowercase : Optional[Any] = self.encoder(_A )
__lowercase : Dict = self.quant_conv(_A )
__lowercase : str = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : Optional[Any] = True ) -> int:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__lowercase : Optional[Any] = self.post_quant_conv(_A )
__lowercase : str = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : Any = True ) -> Union[str, Any]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
__lowercase : Any = [self._decode(_A ).sample for z_slice in z.split(1 )]
__lowercase : Optional[int] = torch.cat(_A )
else:
__lowercase : Union[str, Any] = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : Optional[int] , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__lowercase : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Any , __a : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__lowercase : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase ( self : Tuple , __a : Tuple , __a : Optional[Any] = True ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowercase : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowercase : List[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowercase : Any = []
for i in range(0 , x.shape[2] , _A ):
__lowercase : int = []
for j in range(0 , x.shape[3] , _A ):
__lowercase : Tuple = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowercase : List[str] = self.encoder(_A )
__lowercase : Tuple = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__lowercase : Tuple = []
for i, row in enumerate(_A ):
__lowercase : Union[str, Any] = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase : Optional[int] = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowercase : List[Any] = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowercase : List[str] = torch.cat(_A , dim=2 )
__lowercase : List[Any] = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : List[Any] = True ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowercase : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowercase : str = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowercase : int = []
for i in range(0 , z.shape[2] , _A ):
__lowercase : Union[str, Any] = []
for j in range(0 , z.shape[3] , _A ):
__lowercase : Tuple = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowercase : str = self.post_quant_conv(_A )
__lowercase : Union[str, Any] = self.decoder(_A )
row.append(_A )
rows.append(_A )
__lowercase : Dict = []
for i, row in enumerate(_A ):
__lowercase : Optional[int] = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase : List[str] = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowercase : Tuple = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowercase : Optional[int] = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] = False , __a : List[Any] = True , __a : List[str] = None , ) -> Any:
"""simple docstring"""
__lowercase : int = sample
__lowercase : Any = self.encode(_A ).latent_dist
if sample_posterior:
__lowercase : List[str] = posterior.sample(generator=_A )
else:
__lowercase : int = posterior.mode()
__lowercase : Dict = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : int = 16000 ):
__lowercase : List[Any] = int(round(sample_rate * max_length ) )
if len(UpperCAmelCase__ ) <= sample_length:
return wav
__lowercase : List[Any] = randint(0 , len(UpperCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : int = field(default=__a , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_A : int = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Any = field(
default=__a , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_A : List[Any] = field(
default=__a , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_A : Dict = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_A : List[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_A : Optional[int] = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_A : Union[str, Any] = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_A : int = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : str = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_A : List[Any] = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_A : List[Any] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : int = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_A : Tuple = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : Dict = field(
default=__a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_A : List[str] = field(
default=__a , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_A : int = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_A : Union[str, Any] = field(
default=__a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_A : Tuple = field(
default=__a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowercase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
__lowercase : Optional[Any] = DatasetDict()
__lowercase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowercase : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowercase : int = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase_ : Dict ):
__lowercase : List[str] = []
for audio in batch[data_args.audio_column_name]:
__lowercase : Union[str, Any] = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCAmelCase__ )
__lowercase : List[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
__lowercase : int = {model_input_name: inputs.get(UpperCAmelCase__ )}
__lowercase : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase_ : Optional[Any] ):
__lowercase : Optional[Any] = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
__lowercase : int = feature_extractor(UpperCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
__lowercase : Optional[int] = {model_input_name: inputs.get(UpperCAmelCase__ )}
__lowercase : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase : int = raw_datasets["""train"""].features[data_args.label_column_name].names
__lowercase , __lowercase : int = {}, {}
for i, label in enumerate(UpperCAmelCase__ ):
__lowercase : Optional[Any] = str(UpperCAmelCase__ )
__lowercase : Optional[int] = label
# Load the accuracy metric from the datasets package
__lowercase : str = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[str] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=eval_pred.label_ids )
__lowercase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase__ ) , labelaid=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase : Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowercase : str = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCAmelCase__ , output_all_columns=UpperCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowercase : Dict = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCAmelCase__ , output_all_columns=UpperCAmelCase__ )
# Initialize our trainer
__lowercase : List[Any] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# Training
if training_args.do_train:
__lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowercase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase : Union[str, Any] = last_checkpoint
__lowercase : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase : str = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
__lowercase : Any = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
def update_area_of_max_square(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowercase : Dict = update_area_of_max_square(lowerCAmelCase_ , col + 1 )
__lowercase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
__lowercase : Any = update_area_of_max_square(row + 1 , lowerCAmelCase_ )
if mat[row][col]:
__lowercase : List[str] = 1 + min([right, diagonal, down] )
__lowercase : int = max(largest_square_area[0] , lowerCAmelCase_ )
return sub_problem_sol
else:
return 0
__lowercase : List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
def update_area_of_max_square_using_dp_array(
lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowercase : Optional[int] = update_area_of_max_square_using_dp_array(lowerCAmelCase_ , col + 1 , lowerCAmelCase_ )
__lowercase : int = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase_ )
__lowercase : str = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
if mat[row][col]:
__lowercase : Tuple = 1 + min([right, diagonal, down] )
__lowercase : Optional[int] = max(largest_square_area[0] , lowerCAmelCase_ )
__lowercase : List[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowercase : Optional[Any] = [0]
__lowercase : int = [[-1] * cols for _ in range(lowerCAmelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase_ )
return largest_square_area[0]
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ):
__lowercase : Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowercase : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase : int = dp_array[row][col + 1]
__lowercase : int = dp_array[row + 1][col + 1]
__lowercase : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowercase : int = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = max(dp_array[row][col] , lowerCAmelCase_ )
else:
__lowercase : str = 0
return largest_square_area
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ):
__lowercase : Optional[int] = [0] * (cols + 1)
__lowercase : List[Any] = [0] * (cols + 1)
__lowercase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase : Union[str, Any] = current_row[col + 1]
__lowercase : Any = next_row[col + 1]
__lowercase : Any = next_row[col]
if mat[row][col] == 1:
__lowercase : Tuple = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : int = max(current_row[col] , lowerCAmelCase_ )
else:
__lowercase : Any = 0
__lowercase : Optional[int] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCamelCase : Optional[int] = TypeVar('''T''')
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : int , __a : list[T] , __a : Callable[[T, T], T] ) -> None:
"""simple docstring"""
__lowercase : Any | T = None
__lowercase : int = len(lowerCamelCase__ )
__lowercase : list[T] = [any_type for _ in range(self.N )] + arr
__lowercase : Tuple = fnc
self.build()
def lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__lowercase : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : Any , __a : int , __a : T ) -> None:
"""simple docstring"""
p += self.N
__lowercase : Optional[int] = v
while p > 1:
__lowercase : Tuple = p // 2
__lowercase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : Dict , __a : int , __a : int ) -> T | None: # noqa: E741
"""simple docstring"""
__lowercase : Optional[Any] = l + self.N, r + self.N
__lowercase : T | None = None
while l <= r:
if l % 2 == 1:
__lowercase : List[Any] = self.st[l] if res is None else self.fn(lowerCamelCase__ , self.st[l] )
if r % 2 == 0:
__lowercase : Tuple = self.st[r] if res is None else self.fn(lowerCamelCase__ , self.st[r] )
__lowercase : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCamelCase : Optional[int] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCamelCase : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCamelCase : Union[str, Any] = SegmentTree(test_array, min)
lowerCamelCase : Optional[Any] = SegmentTree(test_array, max)
lowerCamelCase : str = SegmentTree(test_array, lambda a, b: a + b)
def snake_case_ ( ):
for i in range(len(lowerCAmelCase_ ) ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : Dict = reduce(lowerCAmelCase_ , test_array[i : j + 1] )
__lowercase : List[str] = reduce(lowerCAmelCase_ , test_array[i : j + 1] )
__lowercase : int = reduce(lambda lowerCAmelCase_ , lowerCAmelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
assert max_range == max_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
lowerCamelCase : List[str] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
from __future__ import annotations
from random import choice
def snake_case_ ( lowerCAmelCase_ : Dict ):
return choice(snake_case_ )
def snake_case_ ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
__lowercase : Any = random_pivot(snake_case_ )
# partition based on pivot
# linear time
__lowercase : Optional[Any] = [e for e in lst if e < pivot]
__lowercase : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case_ ) < k - 1:
return kth_number(snake_case_ , k - len(snake_case_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
'''simple docstring'''
_A : Dict = ['''pixel_values''']
def __init__( self : Dict , __a : str = True , __a : str = None , __a : List[Any] = PIL.Image.BICUBIC , __a : str = True , __a : List[Any] = None , __a : Tuple = 1 / 255 , __a : int = True , __a : Dict = True , __a : str = None , __a : Dict = None , **__a : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Union[str, Any] = size if size is not None else {"""height""": 256, """width""": 256}
__lowercase : Tuple = get_size_dict(__a )
__lowercase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : List[Any] = size
__lowercase : Dict = resample
__lowercase : Any = do_center_crop
__lowercase : Any = crop_size
__lowercase : Tuple = do_rescale
__lowercase : str = rescale_factor
__lowercase : Tuple = do_normalize
__lowercase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : str , __a : int , __a : Optional[Any] , __a : List[Any] = PIL.Image.BICUBIC , __a : Optional[int] = None , **__a : List[Any] , ) -> Any:
"""simple docstring"""
__lowercase : str = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}" )
return resize(
__a , size=(size["""height"""], size["""width"""]) , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : int , __a : Optional[Any] = None , **__a : int , ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : Tuple = None , **__a : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : int , __a : Dict , __a : Optional[int] , __a : List[Any] , __a : Tuple = None , **__a : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Optional[int] = None , __a : Any = None , __a : int=None , __a : int = None , __a : Optional[int] = None , __a : Union[str, Any] = None , __a : Dict = None , __a : List[Any] = None , __a : List[Any] = None , __a : Optional[Any] = None , __a : Union[str, Any] = None , __a : Any = ChannelDimension.FIRST , **__a : Dict , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowercase : str = resample if resample is not None else self.resample
__lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : Dict = size if size is not None else self.size
__lowercase : str = get_size_dict(__a )
__lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
__lowercase : Any = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Dict = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Optional[int] = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : int = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : List[str] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Dict = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Tuple = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = tempfile.mkdtemp()
__lowercase : int = BlipImageProcessor()
__lowercase : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__lowercase : Optional[int] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__lowercase : Any = InstructBlipProcessor(__a , __a , __a )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[str] , **__a : str ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).tokenizer
def lowerCAmelCase ( self : int , **__a : Optional[Any] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def lowerCAmelCase ( self : Any , **__a : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).qformer_tokenizer
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowercase : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : List[str] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowercase : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
self.assertIsInstance(processor.qformer_tokenizer , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : int = self.get_qformer_tokenizer()
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__lowercase : Any = self.prepare_image_inputs()
__lowercase : Any = image_processor(__a , return_tensors="""np""" )
__lowercase : Any = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : List[str] = self.get_qformer_tokenizer()
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__lowercase : Dict = """lower newer"""
__lowercase : Union[str, Any] = processor(text=__a )
__lowercase : str = tokenizer(__a , return_token_type_ids=__a )
__lowercase : List[str] = qformer_tokenizer(__a , return_token_type_ids=__a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__lowercase : Tuple = """lower newer"""
__lowercase : Optional[Any] = self.prepare_image_inputs()
__lowercase : List[str] = processor(text=__a , images=__a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_qformer_tokenizer()
__lowercase : List[Any] = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Optional[int] = processor.batch_decode(__a )
__lowercase : Any = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : int = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : int = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__lowercase : List[Any] = """lower newer"""
__lowercase : Optional[int] = self.prepare_image_inputs()
__lowercase : Dict = processor(text=__a , images=__a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = ReformerTokenizer
_A : int = ReformerTokenizerFast
_A : Any = True
_A : Tuple = False
_A : Union[str, Any] = True
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().setUp()
__lowercase : str = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = """<s>"""
__lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1000 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : Any = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : Dict = """I was born in 92000, and this is falsé."""
__lowercase : Any = tokenizer.tokenize(__a )
__lowercase : Any = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowercase : str = self.get_rust_tokenizer()
__lowercase : Optional[int] = tokenizer.encode(__a )
__lowercase : Optional[int] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any]=15 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__lowercase : Optional[Any] = """This is a simple input"""
__lowercase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase : List[Any] = ("""This is a simple input""", """This is a pair""")
__lowercase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ReformerTokenizer(__a , keep_accents=__a )
__lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__lowercase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Any = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = """Hello World!"""
__lowercase : Optional[int] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : int = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__lowercase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : int = """ """.join(__a )
__lowercase : int = self.big_tokenizer.encode_plus(__a , return_tensors="""pt""" )
__lowercase : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__lowercase : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__lowercase : str = encoded_sequence["""input_ids"""].shape
__lowercase : Optional[Any] = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : int = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__lowercase : List[str] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__a , sequences=__a , )
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) < 2:
return collection
def circle_sort_util(lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> bool:
__lowercase : Optional[int] = False
if low == high:
return swapped
__lowercase : Optional[Any] = low
__lowercase : Any = high
while left < right:
if collection[left] > collection[right]:
__lowercase : List[Any] = (
collection[right],
collection[left],
)
__lowercase : Any = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowercase : Any = (
collection[right + 1],
collection[left],
)
__lowercase : int = True
__lowercase : int = low + int((high - low) / 2 )
__lowercase : Union[str, Any] = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
return swapped or left_swap or right_swap
__lowercase : str = True
while is_not_sorted is True:
__lowercase : List[Any] = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCamelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , **__a : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**__a )
def lowerCAmelCase ( self : List[Any] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = []
__lowercase : int = []
__lowercase : Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase : Optional[Any] = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
__lowercase : List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self : int , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = BeautifulSoup(__a , """html.parser""" )
__lowercase : Optional[int] = []
__lowercase : Optional[int] = []
__lowercase : str = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase : str = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
__lowercase : str = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__a ) != len(__a ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """"""
for tagname, subs in zip(__a , __a ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self : Optional[Any] , __a : List[str] ) -> BatchFeature:
"""simple docstring"""
__lowercase : List[str] = False
# Check that strings has a valid type
if isinstance(__a , __a ):
__lowercase : List[str] = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
__lowercase : Union[str, Any] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"but is of type {type(__a )}." )
__lowercase : Union[str, Any] = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
__lowercase : int = [html_strings]
# Get nodes + xpaths
__lowercase : Tuple = []
__lowercase : Any = []
for html_string in html_strings:
__lowercase : Dict = self.get_three_from_single(__a )
nodes.append(__a )
__lowercase : List[str] = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
__lowercase : List[Any] = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
__lowercase : str = {"""nodes""": nodes, """xpaths""": xpaths}
__lowercase : Optional[Any] = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
lowerCamelCase : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
# Return True if there is node that has not iterated.
__lowercase : List[Any] = [False] * len(lowerCAmelCase_ )
__lowercase : List[Any] = [s]
__lowercase : Dict = True
while queue:
__lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase_ )
__lowercase : Dict = True
__lowercase : List[str] = u
return visited[t]
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Optional[Any] = [-1] * (len(lowerCAmelCase_ ))
__lowercase : str = 0
__lowercase : Tuple = []
__lowercase : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = float("""Inf""" )
__lowercase : Dict = sink
while s != source:
# Find the minimum value in select path
__lowercase : List[Any] = min(lowerCAmelCase_ , graph[parent[s]][s] )
__lowercase : str = parent[s]
max_flow += path_flow
__lowercase : List[str] = sink
while v != source:
__lowercase : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase : Union[str, Any] = parent[v]
for i in range(len(lowerCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """width_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : List[Any] , __a : Tuple=13 , __a : List[str]=64 , __a : List[str]=2 , __a : str=3 , __a : Union[str, Any]="swish" , __a : List[str]=3 , __a : str=32 , __a : List[str]=0.1 , __a : List[str]=0.02 , __a : Any=True , __a : Optional[Any]=True , __a : List[Any]=10 , __a : Any=None , __a : Optional[Any]=0.25 , __a : Optional[int]=0.0 , __a : Dict=0.0 , ) -> List[str]:
"""simple docstring"""
__lowercase : int = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : str = image_size
__lowercase : Optional[Any] = patch_size
__lowercase : Any = num_channels
__lowercase : int = make_divisible(512 * width_multiplier , divisor=8 )
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[Any] = conv_kernel_size
__lowercase : Optional[int] = output_stride
__lowercase : List[Any] = classifier_dropout_prob
__lowercase : Tuple = use_labels
__lowercase : List[Any] = is_training
__lowercase : Dict = num_labels
__lowercase : Optional[int] = initializer_range
__lowercase : int = scope
__lowercase : Optional[int] = width_multiplier
__lowercase : Any = ffn_dropout
__lowercase : Optional[int] = attn_dropout
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Any = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase ( self : Optional[int] , __a : Tuple , __a : Tuple , __a : Any , __a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = MobileViTVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : str , __a : Optional[Any] , __a : int ) -> str:
"""simple docstring"""
__lowercase : Any = self.num_labels
__lowercase : Dict = MobileViTVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , __a : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.num_labels
__lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase : Dict = config_and_inputs
__lowercase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : str = False
_A : List[str] = False
_A : Any = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = MobileViTVaModelTester(self )
__lowercase : List[str] = MobileViTVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Optional[Any] = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
def check_hidden_states_output(__a : Tuple , __a : Any , __a : List[str] ):
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Optional[int] = outputs.hidden_states
__lowercase : Optional[int] = 5
self.assertEqual(len(__a ) , __a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase : List[str] = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Union[str, Any] = MobileViTVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__a )
__lowercase : Optional[int] = self.default_image_processor
__lowercase : List[Any] = prepare_img()
__lowercase : Dict = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : List[Any] = model(**__a )
# verify the logits
__lowercase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Union[str, Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Tuple = model.to(__a )
__lowercase : List[str] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Union[str, Any] = prepare_img()
__lowercase : Union[str, Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Union[str, Any] = outputs.logits
# verify the logits
__lowercase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
__lowercase : List[str] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Dict = model.to(__a )
__lowercase : str = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Tuple = prepare_img()
__lowercase : List[str] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : List[str] = model(**__a )
__lowercase : Optional[Any] = outputs.logits.detach().cpu()
__lowercase : Any = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
__lowercase : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
__lowercase : Dict = image_processor.post_process_semantic_segmentation(outputs=__a )
__lowercase : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a )
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase : List[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
__lowercase : List[Any] = """https://pypi.org/pypi/diffusers/json"""
__lowercase : Tuple = json.loads(request.urlopen(lowerCAmelCase_ ).read() )["""releases"""].keys()
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : version.Version(lowerCAmelCase_ ) )
def snake_case_ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Any = Path(lowerCAmelCase_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] ):
init_hf_modules()
__lowercase : List[str] = Path(lowerCAmelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Union[str, Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[Any] = f.read()
# Imports of the form `import .xxx`
__lowercase : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : List[str] = False
__lowercase : Union[str, Any] = [module_file]
__lowercase : int = []
# Let's recurse through all relative imports
while not no_change:
__lowercase : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase_ ) )
__lowercase : List[Any] = Path(lowerCAmelCase_ ).parent
__lowercase : Tuple = [str(module_path / m ) for m in new_imports]
__lowercase : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
__lowercase : str = [F"{f}.py" for f in new_import_files]
__lowercase : Union[str, Any] = len(lowerCAmelCase_ ) == 0
all_relative_imports.extend(lowerCAmelCase_ )
return all_relative_imports
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[Any] = f.read()
# Imports of the form `import xxx`
__lowercase : Dict = re.findall("""^\s*import\s+(\S+)\s*$""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
__lowercase : Union[str, Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowercase : Optional[int] = list(set(lowerCAmelCase_ ) )
__lowercase : Tuple = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase_ )
except ImportError:
missing_packages.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(lowerCAmelCase_ )}. Run `pip install {' '.join(lowerCAmelCase_ )}`" )
return get_relative_imports(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : str = module_path.replace(os.path.sep , """.""" )
__lowercase : Optional[Any] = importlib.import_module(lowerCAmelCase_ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase_ )
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
from ..pipelines import DiffusionPipeline
__lowercase : Optional[int] = dict(inspect.getmembers(lowerCAmelCase_ , inspect.isclass ) )
__lowercase : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
__lowercase : Tuple = cls
return pipeline_class
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , ):
__lowercase : Dict = str(lowerCAmelCase_ )
__lowercase : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
__lowercase : Dict = module_file_or_url
__lowercase : Union[str, Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowercase : List[str] = get_diffusers_versions()
# cut ".dev0"
__lowercase : List[str] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowercase : Optional[int] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__lowercase : List[Any] = F"v{revision}"
elif revision == "main":
__lowercase : Tuple = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__lowercase : Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase_ , pipeline=lowerCAmelCase_ )
try:
__lowercase : Optional[int] = cached_download(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , )
__lowercase : Any = """git"""
__lowercase : Tuple = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__lowercase : Optional[int] = hf_hub_download(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , )
__lowercase : List[str] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__lowercase : str = check_imports(lowerCAmelCase_ )
# Now we move the module inside our cached dynamic modules.
__lowercase : Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase_ )
__lowercase : List[Any] = Path(lowerCAmelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
__lowercase : Any = F"{module_needed}.py"
shutil.copy(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = use_auth_token
elif use_auth_token is True:
__lowercase : Optional[int] = HfFolder.get_token()
else:
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = model_info(lowerCAmelCase_ , revision=lowerCAmelCase_ , token=lowerCAmelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowercase : Union[str, Any] = submodule_path / commit_hash
__lowercase : int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase_ , F"{module_needed}.py" , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
return os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : str , ):
__lowercase : Tuple = get_cached_module_file(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
return get_class_in_module(lowerCAmelCase_ , final_module.replace(""".py""" , """""" ) )
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
__lowercase
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : Any = 16
lowerCamelCase : Optional[Any] = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
__lowercase : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase : str = 16
elif accelerator.mixed_precision != "no":
__lowercase : Union[str, Any] = 8
else:
__lowercase : Any = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ )
__lowercase : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
# Initialize accelerator
__lowercase : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Any = config["""lr"""]
__lowercase : Union[str, Any] = int(config["""num_epochs"""] )
__lowercase : Optional[int] = int(config["""seed"""] )
__lowercase : Optional[Any] = int(config["""batch_size"""] )
__lowercase : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowercase : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__lowercase : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowercase : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase : Any = model.to(accelerator.device )
# Instantiate optimizer
__lowercase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowercase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase : List[str] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase : Optional[int] = model(**lowerCAmelCase_ )
__lowercase : Any = outputs.loss
__lowercase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : List[str] = model(**lowerCAmelCase_ )
__lowercase : int = outputs.logits.argmax(dim=-1 )
__lowercase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__lowercase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowercase : List[Any] = parser.parse_args()
__lowercase : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase : Any = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCamelCase : Optional[Any] = '''▁'''
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __a : str , __a : List[Any]=True , __a : List[Any]=True , __a : List[str]=False , __a : Any="[CLS]" , __a : Tuple="[SEP]" , __a : Tuple="<unk>" , __a : str="[SEP]" , __a : Tuple="<pad>" , __a : Tuple="[CLS]" , __a : Dict="[MASK]" , __a : Optional[Dict[str, Any]] = None , **__a : List[str] , ) -> None:
"""simple docstring"""
__lowercase : Dict = (
AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a )
if isinstance(__a , __a )
else mask_token
)
__lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : List[Any] = do_lower_case
__lowercase : str = remove_space
__lowercase : Optional[Any] = keep_accents
__lowercase : Any = vocab_file
__lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.__dict__.copy()
__lowercase : Dict = None
return state
def __setstate__( self : Dict , __a : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> int:
"""simple docstring"""
if self.remove_space:
__lowercase : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
__lowercase : Any = inputs
__lowercase : Optional[int] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase : Optional[int] = unicodedata.normalize("""NFKD""" , __a )
__lowercase : Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__lowercase : str = outputs.lower()
return outputs
def lowerCAmelCase ( self : Tuple , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.preprocess_text(__a )
__lowercase : Optional[Any] = self.sp_model.encode(__a , out_type=__a )
__lowercase : Optional[Any] = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase : Optional[int] = cur_pieces[1:]
else:
__lowercase : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def lowerCAmelCase ( self : Tuple , __a : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : Dict = """"""
__lowercase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowercase : Optional[Any] = True
__lowercase : List[str] = []
else:
current_sub_tokens.append(__a )
__lowercase : Optional[Any] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[int] = [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : List[str] = [self.sep_token_id]
__lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowercase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
lowerCamelCase : List[Any] = input('''Enter Video/IGTV url: ''').strip()
lowerCamelCase : Any = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> str:
"""simple docstring"""
__lowercase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : int = np.random.RandomState(__a )
__lowercase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : List[str] = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : str = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Any = pipe(**__a ).images
__lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**__a ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[Any] = ort.SessionOptions()
__lowercase : Tuple = False
return options
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = """A fantasy landscape, trending on artstation"""
__lowercase : Tuple = np.random.RandomState(0 )
__lowercase : Any = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : int = output.images
__lowercase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : int = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Optional[Any] = init_image.resize((768, 512) )
__lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = """A fantasy landscape, trending on artstation"""
__lowercase : Dict = np.random.RandomState(0 )
__lowercase : Optional[int] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : str = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : int = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCamelCase : Dict = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = EfficientNetConfig()
__lowercase : int = CONFIG_MAP[model_name]["""hidden_dim"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""width_coef"""]
__lowercase : Union[str, Any] = CONFIG_MAP[model_name]["""depth_coef"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
__lowercase : Dict = CONFIG_MAP[model_name]["""dw_padding"""]
__lowercase : int = """huggingface/label-files"""
__lowercase : int = """imagenet-1k-id2label.json"""
__lowercase : Dict = 1000
__lowercase : str = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Tuple = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = idalabel
__lowercase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Any = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__lowercase : Optional[int] = sorted(set(lowerCAmelCase_ ) )
__lowercase : Optional[int] = len(lowerCAmelCase_ )
__lowercase : Union[str, Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
__lowercase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__lowercase : List[Any] = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__lowercase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase : List[str] = """efficientnet.""" + item[1]
__lowercase : Optional[Any] = """classifier.weight"""
__lowercase : Optional[int] = """classifier.bias"""
return key_mapping
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase : List[str] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase : int = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase : Tuple = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
__lowercase : List[Any] = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ):
__lowercase : int = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , )
__lowercase : Tuple = original_model.trainable_variables
__lowercase : str = original_model.non_trainable_variables
__lowercase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase : Optional[Any] = param.numpy()
__lowercase : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
__lowercase : str = get_efficientnet_config(lowerCAmelCase_ )
__lowercase : List[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
__lowercase : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__lowercase : List[str] = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
__lowercase : Optional[Any] = convert_image_processor(lowerCAmelCase_ )
__lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase : Tuple = hf_model(**lowerCAmelCase_ )
__lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
__lowercase : Any = False
__lowercase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase : Tuple = image.img_to_array(lowerCAmelCase_ )
__lowercase : str = np.expand_dims(lowerCAmelCase_ , axis=0 )
__lowercase : Optional[Any] = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__lowercase : List[str] = F"efficientnet-{model_name}"
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCamelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if index == number_of_items:
return 0
__lowercase : Optional[Any] = 0
__lowercase : int = 0
__lowercase : Any = knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index + 1 )
if weights[index] <= max_weight:
__lowercase : str = values[index] + knapsack(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_weight - weights[index] , index + 1 )
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''dpt'''
def __init__( self : List[Any] , __a : Any=768 , __a : List[Any]=12 , __a : Union[str, Any]=12 , __a : Tuple=3072 , __a : Dict="gelu" , __a : Tuple=0.0 , __a : Dict=0.0 , __a : str=0.02 , __a : Dict=1E-12 , __a : str=384 , __a : Union[str, Any]=16 , __a : int=3 , __a : str=False , __a : List[Any]=True , __a : str=[2, 5, 8, 11] , __a : Union[str, Any]="project" , __a : Dict=[4, 2, 1, 0.5] , __a : Optional[Any]=[96, 192, 384, 768] , __a : Union[str, Any]=256 , __a : Optional[Any]=-1 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[Any]=0.4 , __a : List[Any]=255 , __a : Dict=0.1 , __a : Dict=[1, 1024, 24, 24] , __a : str=[0, 1] , __a : str=None , **__a : Dict , ) -> str:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = hidden_size
__lowercase : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__lowercase : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__lowercase : Dict = BitConfig(**__a )
elif isinstance(__a , __a ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__lowercase : str = BitConfig(**__a )
elif isinstance(__a , __a ):
__lowercase : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
__lowercase : Optional[int] = backbone_featmap_shape
__lowercase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__lowercase : Optional[Any] = None
__lowercase : List[Any] = None
__lowercase : Dict = []
__lowercase : Dict = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Tuple = image_size
__lowercase : Dict = patch_size
__lowercase : Tuple = num_channels
__lowercase : Dict = qkv_bias
__lowercase : Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__lowercase : Dict = readout_type
__lowercase : str = reassemble_factors
__lowercase : str = neck_hidden_sizes
__lowercase : Union[str, Any] = fusion_hidden_size
__lowercase : Tuple = head_in_index
__lowercase : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowercase : Union[str, Any] = use_auxiliary_head
__lowercase : Union[str, Any] = auxiliary_loss_weight
__lowercase : Dict = semantic_loss_ignore_index
__lowercase : List[Any] = semantic_classifier_dropout
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase : List[str] = self.backbone_config.to_dict()
__lowercase : str = self.__class__.model_type
return output
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple , __a : str , __a : str=13 , __a : Optional[int]=7 , __a : Optional[int]=True , __a : int=True , __a : Any=True , __a : List[Any]=True , __a : Any=True , __a : Union[str, Any]=False , __a : Dict=False , __a : List[Any]=False , __a : List[str]=2 , __a : Union[str, Any]=99 , __a : int=0 , __a : List[Any]=32 , __a : Dict=5 , __a : Optional[int]=4 , __a : List[Any]=0.1 , __a : Optional[Any]=0.1 , __a : List[Any]=512 , __a : Tuple=12 , __a : str=2 , __a : List[Any]=0.02 , __a : int=3 , __a : int=4 , __a : Optional[int]="last" , __a : str=None , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : List[str] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Any = is_training
__lowercase : Any = use_input_lengths
__lowercase : str = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Optional[int] = gelu_activation
__lowercase : Optional[Any] = sinusoidal_embeddings
__lowercase : List[str] = causal
__lowercase : List[Any] = asm
__lowercase : List[str] = n_langs
__lowercase : Dict = vocab_size
__lowercase : Optional[int] = n_special
__lowercase : List[Any] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : Dict = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : Union[str, Any] = num_labels
__lowercase : List[str] = num_choices
__lowercase : int = summary_type
__lowercase : Any = use_proj
__lowercase : int = scope
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[int] = None
if self.use_input_lengths:
__lowercase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : int = None
__lowercase : List[str] = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase ( self : Optional[Any] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Optional[Any] , ) -> Tuple:
"""simple docstring"""
__lowercase : Any = FlaubertModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , lengths=__a , langs=__a )
__lowercase : Dict = model(__a , langs=__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : str , __a : Tuple , __a : int , __a : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = FlaubertWithLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : int , __a : int , __a : Optional[Any] , __a : Optional[int] , __a : Dict , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = FlaubertForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
__lowercase : int = model(__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Any , __a : Dict , __a : Dict , __a : Any , __a : Tuple , __a : List[str] , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Dict , ) -> str:
"""simple docstring"""
__lowercase : int = FlaubertForQuestionAnswering(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a )
__lowercase : str = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
__lowercase : List[str] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
(__lowercase ) : Optional[int] = result_with_labels.to_tuple()
__lowercase : List[Any] = model(__a , start_positions=__a , end_positions=__a )
(__lowercase ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : int , __a : Dict , __a : int , __a : List[str] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Any , __a : Optional[int] , __a : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase : Any = FlaubertForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
__lowercase : int = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Dict , __a : Tuple , __a : List[Any] , __a : Any , __a : str , __a : int , __a : Any , ) -> str:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : str = FlaubertForTokenClassification(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : Any , __a : List[str] , __a : Optional[Any] , __a : int , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.num_choices
__lowercase : Optional[int] = FlaubertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
(
__lowercase
) : Optional[int] = config_and_inputs
__lowercase : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : List[str] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : str , __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : List[Any] , __a : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
__lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Tuple = FlaubertModelTester(self )
__lowercase : str = ConfigTester(self , config_class=__a , emb_dim=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Any = FlaubertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowercase : Tuple = True
__lowercase : List[Any] = model_class(config=__a )
__lowercase : List[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = torch.jit.trace(
__a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , """traced_model.pt""" ) )
__lowercase : Dict = torch.jit.load(os.path.join(__a , """traced_model.pt""" ) , map_location=__a )
loaded(inputs_dict["""input_ids"""].to(__a ) , inputs_dict["""attention_mask"""].to(__a ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__lowercase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
__lowercase : Dict = model(__a )[0]
__lowercase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , __a : List[Any] , __a : Optional[int]=13 , __a : Tuple=7 , __a : Any=True , __a : Dict=True , __a : str=True , __a : Union[str, Any]=True , __a : Dict=99 , __a : List[str]=32 , __a : int=5 , __a : Union[str, Any]=4 , __a : Tuple=37 , __a : List[str]="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Optional[Any]=512 , __a : Tuple=16 , __a : List[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=4 , ) -> int:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Optional[int] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : str = is_training
__lowercase : List[str] = use_attention_mask
__lowercase : Any = use_token_type_ids
__lowercase : Optional[int] = use_labels
__lowercase : Union[str, Any] = vocab_size
__lowercase : str = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[int] = initializer_range
__lowercase : Optional[int] = num_choices
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_attention_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
__lowercase : Tuple = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Tuple = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : str = model_class_name.from_pretrained("""albert-base-v2""" )
__lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__lowercase : Any = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : int = model(__a , attention_mask=__a )[0]
__lowercase : Dict = (1, 11, 768)
self.assertEqual(output.shape , __a )
__lowercase : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : str = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
import numpy as np
def snake_case_ ( lowerCAmelCase_ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( lowerCAmelCase_ : np.ndarray ):
return vector * sigmoid(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if not is_accelerate_available():
return method
__lowercase : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : Tuple , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Optional[Any] , __a : List[str]=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Dict=True , __a : Optional[int]=99 , __a : Tuple=64 , __a : Any=5 , __a : Dict=4 , __a : Union[str, Any]=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : int=512 , __a : Union[str, Any]=16 , __a : Optional[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=3 , __a : List[str]=4 , __a : Any=None , ) -> int:
"""simple docstring"""
__lowercase : str = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : int = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : Dict = num_labels
__lowercase : Dict = num_choices
__lowercase : Optional[int] = scope
__lowercase : Optional[Any] = vocab_size - 1
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[Any] = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase : Tuple = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : List[Any] , __a : Dict ) -> int:
"""simple docstring"""
__lowercase : Tuple = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a )
__lowercase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __a : Any , __a : Optional[int] , __a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = True
__lowercase : List[str] = GPTNeoXModel(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Tuple , __a : Any , __a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : List[str] , __a : str , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : List[str] = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , __a : Any , __a : List[Any] , __a : Union[str, Any] , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = self.num_labels
__lowercase : Optional[Any] = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any] , __a : Any , __a : List[str] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[int] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = True
__lowercase : Tuple = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowercase : int = model(__a , attention_mask=__a , use_cache=__a )
__lowercase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , output_hidden_states=__a )
__lowercase : Optional[int] = output_from_no_past["""hidden_states"""][0]
__lowercase : List[str] = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
__lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase : Optional[Any] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A : List[Any] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
_A : Optional[Any] = False
_A : List[str] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = GPTNeoXModelTester(self )
__lowercase : int = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase : Dict = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase ( self : int , __a : int ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Tuple = ids_tensor([1, 10] , config.vocab_size )
__lowercase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : List[Any] = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
__lowercase : Union[str, Any] = original_model(__a ).last_hidden_state
__lowercase : Tuple = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
__lowercase : str = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__lowercase : List[Any] = scaled_model(__a ).last_hidden_state
__lowercase : List[str] = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : int = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase : List[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
__lowercase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase : Any = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase : Tuple = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
__lowercase : List[Any] = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.