code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ():
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 4 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.48145466, 0.4578275, 0.40821073] , _UpperCamelCase=[0.26862954, 0.26130258, 0.27577711] , _UpperCamelCase=True , ) -> Dict:
lowerCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_convert_rgb
def __a ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __a ( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ) -> Dict:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
lowerCAmelCase_ , lowerCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase_ = [torch.from_numpy(_UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=_UpperCamelCase )
@property
def __a ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Any:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __a ( self ) -> str:
pass
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_UpperCamelCase )
lowerCAmelCase_ = 3
@property
def __a ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> int:
pass
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 290 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _lowerCAmelCase(a : List[Any] , a : str , a : Any , a : int ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =multiprocessing.Manager()
_SCREAMING_SNAKE_CASE =manager.list()
_SCREAMING_SNAKE_CASE =multiprocessing.Process(target=a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _lowerCAmelCase(a : List[str] , a : List[Any] , a : Optional[Any] ) -> Any:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_SCREAMING_SNAKE_CASE =shutil.rmtree
_SCREAMING_SNAKE_CASE =os.rmdir
_SCREAMING_SNAKE_CASE =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_SCREAMING_SNAKE_CASE ={}
with swallow_io():
with time_limit(a ):
exec(a , a )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
_SCREAMING_SNAKE_CASE =rmtree
_SCREAMING_SNAKE_CASE =rmdir
_SCREAMING_SNAKE_CASE =chdir
@contextlib.contextmanager
def _lowerCAmelCase(a : Any ) -> int:
def signal_handler(a : Dict , a : int ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , a )
signal.signal(signal.SIGALRM , a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _lowerCAmelCase() -> List[Any]:
_SCREAMING_SNAKE_CASE =WriteOnlyStringIO()
with contextlib.redirect_stdout(a ):
with contextlib.redirect_stderr(a ):
with redirect_stdin(a ):
yield
@contextlib.contextmanager
def _lowerCAmelCase() -> Tuple:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a ):
yield dirname
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
pass
class __UpperCAmelCase ( io.StringIO ):
'''simple docstring'''
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
return False
class __UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
lowercase : List[str] = "stdin"
@contextlib.contextmanager
def _lowerCAmelCase(a : Any ) -> Any:
if root == ".":
yield
return
_SCREAMING_SNAKE_CASE =os.getcwd()
os.chdir(a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a )
def _lowerCAmelCase(a : Optional[Any]=None ) -> Dict:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import os
_SCREAMING_SNAKE_CASE ='''1'''
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import shutil
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import subprocess
_SCREAMING_SNAKE_CASE =None # type: ignore
_SCREAMING_SNAKE_CASE =None
import sys
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 | 1 |
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowerCAmelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _A ( snake_case__ : Any , snake_case__ : tuple , snake_case__ : Path , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any=False , ):
output_path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case__ , snake_case__ , f=output_path.as_posix() , input_names=snake_case__ , output_names=snake_case__ , dynamic_axes=snake_case__ , do_constant_folding=snake_case__ , use_external_data_format=snake_case__ , enable_onnx_checker=snake_case__ , opset_version=snake_case__ , )
else:
export(
snake_case__ , snake_case__ , f=output_path.as_posix() , input_names=snake_case__ , output_names=snake_case__ , dynamic_axes=snake_case__ , do_constant_folding=snake_case__ , opset_version=snake_case__ , )
@torch.no_grad()
def _A ( snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : bool = False ):
snake_case__ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : Optional[Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
snake_case__ : List[str] = '''cpu'''
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=snake_case__ ).to(snake_case__ )
snake_case__ : List[str] = Path(snake_case__ )
# TEXT ENCODER
snake_case__ : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
snake_case__ : Tuple = pipeline.text_encoder.config.hidden_size
snake_case__ : str = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=snake_case__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=snake_case__ , )
del pipeline.text_encoder
# UNET
snake_case__ : str = pipeline.unet.config.in_channels
snake_case__ : Optional[Any] = pipeline.unet.config.sample_size
snake_case__ : Union[str, Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , snake_case__ , snake_case__ , snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ),
torch.randn(2 ).to(device=snake_case__ , dtype=snake_case__ ),
torch.randn(2 , snake_case__ , snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ),
False,
) , output_path=snake_case__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=snake_case__ , use_external_data_format=snake_case__ , )
snake_case__ : str = str(unet_path.absolute().as_posix() )
snake_case__ : Any = os.path.dirname(snake_case__ )
snake_case__ : Optional[Any] = onnx.load(snake_case__ )
# clean up existing tensor files
shutil.rmtree(snake_case__ )
os.mkdir(snake_case__ )
# collate external tensor files into one
onnx.save_model(
snake_case__ , snake_case__ , save_as_external_data=snake_case__ , all_tensors_to_one_file=snake_case__ , location='''weights.pb''' , convert_attribute=snake_case__ , )
del pipeline.unet
# VAE ENCODER
snake_case__ : Tuple = pipeline.vae
snake_case__ : Union[str, Any] = vae_encoder.config.in_channels
snake_case__ : List[str] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case__ : List[str] = lambda snake_case__ , snake_case__ : vae_encoder.encode(snake_case__ , snake_case__ )[0].sample()
onnx_export(
snake_case__ , model_args=(
torch.randn(1 , snake_case__ , snake_case__ , snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=snake_case__ , )
# VAE DECODER
snake_case__ : Dict = pipeline.vae
snake_case__ : Optional[int] = vae_decoder.config.latent_channels
snake_case__ : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case__ : Optional[Any] = vae_encoder.decode
onnx_export(
snake_case__ , model_args=(
torch.randn(1 , snake_case__ , snake_case__ , snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=snake_case__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case__ : int = pipeline.safety_checker
snake_case__ : str = safety_checker.config.vision_config.num_channels
snake_case__ : Optional[Any] = safety_checker.config.vision_config.image_size
snake_case__ : Optional[int] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , snake_case__ , snake_case__ , snake_case__ , ).to(device=snake_case__ , dtype=snake_case__ ),
torch.randn(1 , snake_case__ , snake_case__ , snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=snake_case__ , )
del pipeline.safety_checker
snake_case__ : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
snake_case__ : Dict = pipeline.feature_extractor
else:
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : Any = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=snake_case__ , feature_extractor=snake_case__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(snake_case__ )
print('''ONNX pipeline saved to''' , snake_case__ )
del pipeline
del onnx_pipeline
snake_case__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(snake_case__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCAmelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 261 | 0 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( __lowercase , __lowercase ) -> int:
_A = np.argmax(__lowercase , axis=1 )
return np.sum(outputs == labels )
def a__ ( __lowercase ) -> List[Any]:
with open(__lowercase , encoding="utf_8" ) as f:
_A = csv.reader(__lowercase )
_A = []
next(__lowercase ) # skip the first line
for line in tqdm(__lowercase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
_A = []
for dataset in encoded_datasets:
_A = len(__lowercase )
_A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_A = np.zeros((n_batch, 2) , dtype=np.intaa )
_A = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_A = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowercase ):
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = with_conta
_A = with_conta
_A = len(__lowercase ) - 1
_A = len(__lowercase ) - 1
_A = with_conta
_A = with_conta
_A = mc_label
_A = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowercase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ) -> List[str]:
_A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowercase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=__lowercase , default="" )
parser.add_argument("--eval_dataset" , type=__lowercase , default="" )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--num_train_epochs" , type=__lowercase , default=3 )
parser.add_argument("--train_batch_size" , type=__lowercase , default=8 )
parser.add_argument("--eval_batch_size" , type=__lowercase , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=__lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=__lowercase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=__lowercase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=__lowercase , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=__lowercase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=__lowercase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=__lowercase , default=0.01 )
parser.add_argument("--lm_coef" , type=__lowercase , default=0.9 )
parser.add_argument("--n_valid" , type=__lowercase , default=374 )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
print(__lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_A = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_A = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(__lowercase , __lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_A = ["_start_", "_delimiter_", "_classify_"]
_A = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowercase )
_A = tokenizer.convert_tokens_to_ids(__lowercase )
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowercase ) )
model.to(__lowercase )
# Load and encode the datasets
def tokenize_and_encode(__lowercase ):
if isinstance(__lowercase , __lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowercase ) )
elif isinstance(__lowercase , __lowercase ):
return obj
return [tokenize_and_encode(__lowercase ) for o in obj]
logger.info("Encoding dataset..." )
_A = load_rocstories_dataset(args.train_dataset )
_A = load_rocstories_dataset(args.eval_dataset )
_A = (train_dataset, eval_dataset)
_A = tokenize_and_encode(__lowercase )
# Compute the max input length for the Transformer
_A = model.config.n_positions // 2 - 2
_A = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_A = min(__lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_A = pre_process_datasets(__lowercase , __lowercase , __lowercase , *__lowercase )
_A , _A = tensor_datasets[0], tensor_datasets[1]
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.train_batch_size )
_A = TensorDataset(*__lowercase )
_A = SequentialSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_A = args.max_steps
_A = args.max_steps // (len(__lowercase ) // args.gradient_accumulation_steps) + 1
else:
_A = len(__lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_A = list(model.named_parameters() )
_A = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
_A = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
_A = AdamW(__lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
_A = get_linear_schedule_with_warmup(
__lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowercase )
if args.do_train:
_A , _A , _A = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
_A = 0
_A = 0
_A = tqdm(__lowercase , desc="Training" )
for step, batch in enumerate(__lowercase ):
_A = tuple(t.to(__lowercase ) for t in batch )
_A , _A , _A , _A = batch
_A = model(__lowercase , mc_token_ids=__lowercase , lm_labels=__lowercase , mc_labels=__lowercase )
_A = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_A = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_A = "Training loss: {:.2e} lr: {:.2e}".format(__lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_A = model.module if hasattr(__lowercase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_A = os.path.join(args.output_dir , __lowercase )
_A = os.path.join(args.output_dir , __lowercase )
torch.save(model_to_save.state_dict() , __lowercase )
model_to_save.config.to_json_file(__lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_A = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowercase )
if args.do_eval:
model.eval()
_A , _A = 0, 0
_A , _A = 0, 0
for batch in tqdm(__lowercase , desc="Evaluating" ):
_A = tuple(t.to(__lowercase ) for t in batch )
_A , _A , _A , _A = batch
with torch.no_grad():
_A , _A , _A , _A = model(
__lowercase , mc_token_ids=__lowercase , lm_labels=__lowercase , mc_labels=__lowercase )
_A = mc_logits.detach().cpu().numpy()
_A = mc_labels.to("cpu" ).numpy()
_A = accuracy(__lowercase , __lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_A = eval_loss / nb_eval_steps
_A = eval_accuracy / nb_eval_examples
_A = tr_loss / nb_tr_steps if args.do_train else None
_A = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
_A = os.path.join(args.output_dir , "eval_results.txt" )
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCamelCase__ = getattr(__lowercase , __lowercase )
if weight_type is not None:
lowerCamelCase__ = getattr(__lowercase , __lowercase ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__lowercase )[0].split(""".""" )[-2]
lowerCamelCase__ = mapped_key.replace("""*""" , __lowercase )
if "weight_g" in name:
lowerCamelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCamelCase__ = """weight_v"""
elif "weight" in name:
lowerCamelCase__ = """weight"""
elif "bias" in name:
lowerCamelCase__ = """bias"""
else:
lowerCamelCase__ = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ = name.split(""".""" )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = SEWConfig()
if is_finetuned:
lowerCamelCase__ = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase__ = model.cfg
lowerCamelCase__ = fs_config.conv_bias
lowerCamelCase__ = eval(fs_config.conv_feature_layers )
lowerCamelCase__ = [x[0] for x in conv_layers]
lowerCamelCase__ = [x[1] for x in conv_layers]
lowerCamelCase__ = [x[2] for x in conv_layers]
lowerCamelCase__ = """gelu"""
lowerCamelCase__ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowerCamelCase__ = 0.0
lowerCamelCase__ = fs_config.activation_fn.name
lowerCamelCase__ = fs_config.encoder_embed_dim
lowerCamelCase__ = 0.02
lowerCamelCase__ = fs_config.encoder_ffn_embed_dim
lowerCamelCase__ = 1e-5
lowerCamelCase__ = fs_config.encoder_layerdrop
lowerCamelCase__ = fs_config.encoder_attention_heads
lowerCamelCase__ = fs_config.conv_pos_groups
lowerCamelCase__ = fs_config.conv_pos
lowerCamelCase__ = len(__lowercase )
lowerCamelCase__ = fs_config.encoder_layers
lowerCamelCase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase__ = model.cfg
lowerCamelCase__ = fs_config.final_dropout
lowerCamelCase__ = fs_config.layerdrop
lowerCamelCase__ = fs_config.activation_dropout
lowerCamelCase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase__ = fs_config.attention_dropout
lowerCamelCase__ = fs_config.dropout_input
lowerCamelCase__ = fs_config.dropout
lowerCamelCase__ = fs_config.mask_channel_length
lowerCamelCase__ = fs_config.mask_channel_prob
lowerCamelCase__ = fs_config.mask_length
lowerCamelCase__ = fs_config.mask_prob
lowerCamelCase__ = """Wav2Vec2FeatureExtractor"""
lowerCamelCase__ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _A ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True ):
"""simple docstring"""
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase__ = SEWConfig.from_pretrained(__lowercase )
else:
lowerCamelCase__ = convert_config(model[0] , __lowercase )
lowerCamelCase__ = model[0].eval()
lowerCamelCase__ = True if config.feat_extract_norm == """layer""" else False
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
if is_finetuned:
if dict_path:
lowerCamelCase__ = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.eos_index
lowerCamelCase__ = len(target_dict.symbols )
lowerCamelCase__ = os.path.join(__lowercase , """vocab.json""" )
if not os.path.isdir(__lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowercase )
lowerCamelCase__ = WavaVecaCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowercase , )
lowerCamelCase__ = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
lowerCamelCase__ = SEWForCTC(__lowercase )
else:
lowerCamelCase__ = SEWModel(__lowercase )
feature_extractor.save_pretrained(__lowercase )
recursively_load_weights(__lowercase , __lowercase , __lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__magic_name__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 129 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=18 , SCREAMING_SNAKE_CASE_ : Optional[int]=30 , SCREAMING_SNAKE_CASE_ : Optional[int]=400 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Any=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Tuple=False , ):
lowerCamelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
lowerCamelCase__ = do_reduce_labels
def __UpperCAmelCase ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ = Image.open(dataset[0]["""file"""] )
lowerCamelCase__ = Image.open(dataset[1]["""file"""] )
return image, map
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ = Image.open(ds[0]["""file"""] )
lowerCamelCase__ = Image.open(ds[1]["""file"""] )
lowerCamelCase__ = Image.open(ds[2]["""file"""] )
lowerCamelCase__ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""" ) )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
pass
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : Optional[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_batch_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowerCamelCase__ = True
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 129 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( lowerCamelCase ) -> Union[str, Any]:
# getting number of pixels in the image
UpperCamelCase_, UpperCamelCase_: Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
UpperCamelCase_: int = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase_ : Optional[Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowerCamelCase_ : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger()
def _UpperCAmelCase ( __A : Dict , __A : Dict , __A : str , __A : List[str] , __A : Union[str, Any] = True ):
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
a_ : List[Any] = timm.create_model('''levit_128s''' , pretrained=__SCREAMING_SNAKE_CASE )
else:
a_ : int = timm.create_model('''levit_128''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
a_ : Optional[int] = timm.create_model('''levit_192''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
a_ : int = timm.create_model('''levit_256''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
a_ : Dict = timm.create_model('''levit_384''' , pretrained=__SCREAMING_SNAKE_CASE )
from_model.eval()
a_ : List[str] = LevitForImageClassificationWithTeacher(__SCREAMING_SNAKE_CASE ).eval()
a_ : Dict = OrderedDict()
a_ : str = from_model.state_dict()
a_ : int = list(from_model.state_dict().keys() )
a_ : List[str] = list(our_model.state_dict().keys() )
print(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : List[str] = weights[og_keys[i]]
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = torch.randn((2, 3, 2_24, 2_24) )
a_ : Dict = from_model(__SCREAMING_SNAKE_CASE )
a_ : Dict = our_model(__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
a_ : int = name
print(__SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a_ : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( __A : int , __A : Any = None , __A : int = True ):
a_ : List[str] = '''imagenet-1k-id2label.json'''
a_ : List[str] = 10_00
a_ : List[str] = (1, num_labels)
a_ : Optional[Any] = '''huggingface/label-files'''
a_ : Optional[Any] = num_labels
a_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
a_ : Tuple = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a_ : str = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
a_ : Optional[Any] = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
a_ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A__ ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def A (__lowerCamelCase :int , __lowerCamelCase :int = 2 , __lowerCamelCase :int = 1 , __lowerCamelCase :int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :int ) -> int:
return (pow(__lowerCamelCase , 2 ) + step) % modulus
for _ in range(__lowerCamelCase ):
# These track the position within the cycle detection logic.
_lowerCAmelCase = seed
_lowerCAmelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase = gcd(hare - tortoise , __lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
_lowercase = parser.parse_args()
_lowercase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
_lowercase = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 5 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowerCamelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ) -> Dict:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _UpperCAmelCase ( ) -> str:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowerCamelCase ):
http_head('''https://huggingface.co''' )
| 430 |
"""simple docstring"""
import os
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case = len(grid[0] )
_snake_case = len(__lowerCamelCase )
_snake_case = 0
_snake_case = 0
_snake_case = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
_snake_case = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_snake_case = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_snake_case = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_snake_case = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_snake_case = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
_snake_case = max_product
return largest
def _UpperCAmelCase ( ) -> str:
_snake_case = []
with open(os.path.dirname(__lowerCamelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_snake_case = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 430 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "falcon"
snake_case__ : Any = ["past_key_values"]
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=6_5_0_2_4 , __lowerCAmelCase : Optional[Any]=4_5_4_4 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=7_1 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=1_1 , __lowerCAmelCase : Optional[Any]=1_1 , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCamelCase : Any = kwargs.pop('''n_embed''' , __lowerCAmelCase )
_lowerCamelCase : Tuple = hidden_size if n_embed is None else n_embed
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Dict = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[Any] = eos_token_id
_lowerCamelCase : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCamelCase : Optional[Any] = alibi
_lowerCamelCase : str = new_decoder_architecture
_lowerCamelCase : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
_lowerCamelCase : List[Any] = parallel_attn
_lowerCamelCase : str = bias
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return not self.alibi
| 83 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_0_0_0_0_0
lowercase_ , lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( A__ : datasets.Dataset , **A__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =dataset.map(**A__ )
@get_duration
def a ( A__ : datasets.Dataset , **A__ : List[Any] ) -> List[str]:
"""simple docstring"""
_lowercase =dataset.filter(**A__ )
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ={'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase =datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_lowercase =generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ )
_lowercase =transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=A__ )
def tokenize(A__ : int ):
return tokenizer(examples['text'] )
_lowercase =map(A__ )
_lowercase =map(A__ , batched=A__ )
_lowercase =map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='numpy' ):
_lowercase =map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='pandas' ):
_lowercase =map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_lowercase =map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_lowercase =map(A__ , function=lambda A__ : None , batched=A__ )
_lowercase =map(A__ , function=A__ , batched=A__ )
_lowercase =filter(A__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 291 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 9 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : Optional[Any] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCamelCase__ : int = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCamelCase__ : Any = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any , _a:Tuple , _a:str=None , _a:str=None , _a:List[Any]=None , _a:Dict=None , _a:List[Any]="auto" , _a:Optional[int]=-1 , _a:int=0.9 , _a:str=5 , _a:List[str]=5_00 , _a:Tuple="gpt2-large" , _a:Union[str, Any]=-1 , _a:Optional[int]=10_24 , _a:Optional[Any]=25 , _a:Optional[Any]=5 , _a:Optional[Any]=True , _a:List[str]=25 , ):
snake_case__ = compute_mauve(
p_text=_a , q_text=_a , p_features=_a , q_features=_a , p_tokens=_a , q_tokens=_a , num_buckets=_a , pca_max_data=_a , kmeans_explained_var=_a , kmeans_num_redo=_a , kmeans_max_iter=_a , featurize_model_name=_a , device_id=_a , max_text_length=_a , divergence_curve_discretization_size=_a , mauve_scaling_factor=_a , verbose=_a , seed=_a , )
return out
| 33 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Dict = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
SCREAMING_SNAKE_CASE : Tuple = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( __a ):
a__ :Dict = VOCAB_FILES_NAMES
a__ :Dict = PRETRAINED_VOCAB_FILES_MAP
a__ :Dict = PRETRAINED_INIT_CONFIGURATION
a__ :str = FunnelTokenizer
a__ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ :int = 2
def __init__(self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase="##" , **__UpperCamelCase , ) -> List[Any]:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , clean_text=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , wordpieces_prefix=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ : List[Any] = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
UpperCamelCase_ : Union[str, Any] = do_lower_case
UpperCamelCase_ : Tuple = strip_accents
UpperCamelCase_ : Dict = tokenize_chinese_chars
UpperCamelCase_ : Union[str, Any] = normalizer_class(**__UpperCamelCase )
UpperCamelCase_ : Dict = do_lower_case
def A_ (self , __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
UpperCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
UpperCamelCase_ : Optional[Any] = [self.sep_token_id]
UpperCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
UpperCamelCase_ : int = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 635 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowercase_ = parser.parse_args()
lowercase_ = '''cpu'''
lowercase_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowercase_ = '''path-to-your-trained-model'''
lowercase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase_ = pipe.to(device)
# to channels last
lowercase_ = pipe.unet.to(memory_format=torch.channels_last)
lowercase_ = pipe.vae.to(memory_format=torch.channels_last)
lowercase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase_ = torch.randn(2, 4, 64, 64)
lowercase_ = torch.rand(1) * 999
lowercase_ = torch.randn(2, 77, 768)
lowercase_ = (sample, timestep, encoder_hidden_status)
try:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase_ = 666
lowercase_ = torch.Generator(device).manual_seed(seed)
lowercase_ = {'''generator''': generator}
if args.steps is not None:
lowercase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 336 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = '''ernie_m'''
_lowerCamelCase: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] ,A_ : int = 25_0002 ,A_ : int = 768 ,A_ : int = 12 ,A_ : int = 12 ,A_ : int = 3072 ,A_ : str = "gelu" ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : int = 514 ,A_ : float = 0.02 ,A_ : int = 1 ,A_ : float = 1e-05 ,A_ : Tuple=None ,A_ : str=False ,A_ : Tuple=0.0 ,**A_ : Optional[Any] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = layer_norm_eps
A = classifier_dropout
A = is_decoder
A = act_dropout | 91 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''OwlViTFeatureExtractor''']
UpperCAmelCase__ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186 | 0 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 712 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = '''</s>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCAmelCase_ ) , 11_03 )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_a = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_a = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_a = '''To ensure a smooth flow of bank resolutions.'''
_a = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 1_50, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ , offset=0 , mask_token_sent=lowerCAmelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Tuple ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 10_00, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_a = self._large_tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(
lowerCAmelCase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 377 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Optional[Any] = False
if num < 0:
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCamelCase ) for e in binary )
return "0b" + "".join(str(_lowerCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FunnelConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 105 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = FlaxAutoencoderKL
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = 4
a__ : List[str] = 3
a__ : List[str] = (32, 32)
a__ : Optional[Any] = jax.random.PRNGKey(0 )
a__ : Any = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
a__ : List[Any] = self.dummy_input
return init_dict, inputs_dict | 251 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = 9
a__ : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
a__ : Optional[Any] = kruskal(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ ) | 251 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a=None , _a=1 ) -> Optional[int]:
_a : List[Any] = tokenizer
_a : Dict = dataset
_a : str = len(_a ) if n_tasks is None else n_tasks
_a : Any = n_copies
def __iter__( self ) -> Optional[int]:
_a : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_a : Dict = self.tokenizer(_a , padding=_a , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[Any]:
_a : Optional[int] = start_length
_a : Optional[int] = eof_strings
_a : Dict = tokenizer
def __call__( self , _a , _a , **_a ) -> Union[str, Any]:
_a : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_a : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_a )
def __UpperCAmelCase ( __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a : int = re.split('''(%s)''' % '''|'''.join(__a ) ,__a )
# last string should be ""
return "".join(string_list[:-2] )
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : int ,__a : List[Any] ,__a : List[Any] ,__a : Union[str, Any]=20 ,**__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : int = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
_a : List[Any] = batch['''ids'''].shape[-1]
_a : List[str] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] ,num_return_sequences=__a ,**__a )
# each task is generated batch_size times
_a : str = batch['''task_id'''].repeat(__a )
_a : Dict = accelerator.pad_across_processes(
__a ,dim=1 ,pad_index=tokenizer.pad_token_id )
_a , _a : Tuple = accelerator.gather((generated_tokens, generated_tasks) )
_a : List[Any] = generated_tokens.cpu().numpy()
_a : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a ,__a ):
gen_token_dict[task].append(__a )
_a : List[str] = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_a : str = tokenizer.decode(__a ,skip_special_tokens=__a ,clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Union[str, Any] = HfArgumentParser(__a )
_a : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_a : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_a : List[str] = '''false'''
if args.num_workers is None:
_a : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_a : str = Accelerator()
set_seed(args.seed ,device_specific=__a )
# Load model and tokenizer
_a : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_a : Dict = tokenizer.eos_token
_a : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_a : Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 ,__a ,__a )] ),
}
# Load evaluation dataset and metric
_a : Optional[Any] = load_dataset('''openai_humaneval''' )
_a : int = load_metric('''code_eval''' )
_a : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_a : Optional[Any] = args.n_samples // args.batch_size
_a : List[Any] = TokenizedDataset(__a ,human_eval['''test'''] ,n_copies=__a ,n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
_a : Union[str, Any] = DataLoader(__a ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_a : Optional[Any] = code_eval_metric.compute(references=[''''''] ,predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_a , _a : Optional[int] = accelerator.prepare(__a ,__a )
_a : Dict = complete_code(
__a ,__a ,__a ,__a ,n_tasks=__a ,batch_size=args.batch_size ,**__a ,)
if accelerator.is_main_process:
_a : Union[str, Any] = []
for task in tqdm(range(__a ) ):
_a : Any = human_eval['''test'''][task]['''test''']
_a : Any = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_a , _a : Optional[int] = code_eval_metric.compute(
references=__a ,predictions=__a ,num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file ,'''w''' ) as fp:
json.dump(__a ,__a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__UpperCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__UpperCAmelCase = get_tests_dir("fixtures/vocab.json")
__UpperCAmelCase = get_tests_dir("fixtures")
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = 0
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[int] = WavaVecaConfig()
snake_case: List[str] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' ) )
snake_case: List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[int] = WavaVecaFeatureExtractor()
snake_case: int = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case: int = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'r' ) as f:
snake_case: List[str] = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('processor_class' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
snake_case: Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Union[str, Any] = WavaVecaFeatureExtractor()
snake_case: Optional[Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case: List[Any] = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'r' ) as f:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('processor_class' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[int] = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' ) as f:
f.write('{}' )
snake_case: Tuple = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
snake_case: List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
snake_case: int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
snake_case: Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _UpperCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case: str = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case: str = os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case: Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = False
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = False
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "AutoFeatureExtractor"
__UpperCamelCase = "AutoTokenizer"
__UpperCamelCase = False
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local classes.
snake_case: Optional[Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case: Any = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case: Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCamelCase ( cls ):
'''simple docstring'''
snake_case: List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , 'test-processor' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
snake_case: Optional[Any] = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , 'test-processor-org' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token , organization='valid_org' , )
snake_case: Optional[int] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCamelCase ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case: Optional[int] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case: Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case: List[Any] = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Any = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
snake_case: Optional[int] = Repository(SCREAMING_SNAKE_CASE__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) ) as f:
snake_case: List[Any] = json.load(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , 'custom_processing.py' ) ) )
repo.push_to_hub()
snake_case: Dict = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' ) | 692 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Tuple , _A : Dict=768 ):
super().__init__(_A )
_UpperCamelCase = proj_size
_UpperCamelCase = CLIPVisionModel(_A )
_UpperCamelCase = PaintByExampleMapper(_A )
_UpperCamelCase = nn.LayerNorm(config.hidden_size )
_UpperCamelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCamelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : str=False ):
_UpperCamelCase = self.model(pixel_values=_A )
_UpperCamelCase = clip_output.pooler_output
_UpperCamelCase = self.mapper(latent_states[:, None] )
_UpperCamelCase = self.final_layer_norm(_A )
_UpperCamelCase = self.proj_out(_A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Optional[int] ):
super().__init__()
_UpperCamelCase = (config.num_hidden_layers + 1) // 5
_UpperCamelCase = config.hidden_size
_UpperCamelCase = 1
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn='''gelu''' , attention_bias=_A )
for _ in range(_A )
] )
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
for block in self.blocks:
_UpperCamelCase = block(_A )
return hidden_states
| 10 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {"vocab_file": "spiece.model"}
a__ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
a__ : Union[str, Any] = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
a__ : Optional[int] = "▁"
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=False , lowerCAmelCase : str="[CLS]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="[SEP]" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : int="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> None:
"""simple docstring"""
lowercase__ = (
AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase)
else mask_token
)
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase)
@property
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , lowerCAmelCase : str) -> Any:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
if self.remove_space:
lowercase__ = ' '.join(inputs.strip().split())
else:
lowercase__ = inputs
lowercase__ = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
lowercase__ = unicodedata.normalize('NFKD' , lowerCAmelCase)
lowercase__ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase)])
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(lowerCAmelCase)
lowercase__ = self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
lowercase__ = []
for piece in pieces:
if len(lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCAmelCase)
else:
new_pieces.append(lowerCAmelCase)
return new_pieces
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
lowercase__ = False
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
| 622 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
__lowercase = Namespace(**checkpoint["cfg"]["model"] )
__lowercase = checkpoint["model"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
__lowercase = state_dict["decoder.embed_tokens.weight"].shape[0]
__lowercase = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(_SCREAMING_SNAKE_CASE )
__lowercase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case__ : int = parser.parse_args()
snake_case__ : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 655 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = StableUnCLIPPipeline
UpperCAmelCase__: Optional[int] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__: str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__: List[Any] = False
def __A ( self ):
A__ : Optional[int] = 32
A__ : Union[str, Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : List[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=A__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A__ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[int] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=A__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
A__ : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=A__ )
A__ : Any = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
A__ : List[str] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : Any = AutoencoderKL()
A__ : Optional[int] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : List[str] = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
A__ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def __A ( self ):
A__ : Optional[int] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A__ : Any = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : str = pipe("""anime turle""" , generator=A__ , output_type="""np""" )
A__ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
A__ : Tuple = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Dict = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
A__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 456 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_50, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=A__ , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
A__ : int = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ : str = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A__ , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(A__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self , A__ ):
# create estimator
A__ : str = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
A__ : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A__ )
| 456 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : int = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE__ : int = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] ,num_layers=predefined_args["""num_layers"""] ,units=predefined_args["""units"""] ,hidden_size=predefined_args["""hidden_size"""] ,max_length=predefined_args["""max_length"""] ,num_heads=predefined_args["""num_heads"""] ,scaled=predefined_args["""scaled"""] ,dropout=predefined_args["""dropout"""] ,output_attention=_snake_case ,output_all_encodings=_snake_case ,use_residual=predefined_args["""use_residual"""] ,activation=predefined_args.get("""activation""" ,"""gelu""" ) ,layer_norm_eps=predefined_args.get("""layer_norm_eps""" ,_snake_case ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE__ : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(get_home_dir() ,"""models""" )
SCREAMING_SNAKE_CASE__ : str = _load_vocab(_snake_case ,_snake_case ,_snake_case ,cls=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = nlp.model.BERTModel(
_snake_case ,len(_snake_case ) ,units=predefined_args["""units"""] ,embed_size=predefined_args["""embed_size"""] ,embed_dropout=predefined_args["""embed_dropout"""] ,word_embed=predefined_args["""word_embed"""] ,use_pooler=_snake_case ,use_token_type_embed=_snake_case ,token_type_vocab_size=predefined_args["""token_type_vocab_size"""] ,use_classifier=_snake_case ,use_decoder=_snake_case ,)
original_bort.load_parameters(_snake_case ,cast_dtype=_snake_case ,ignore_extra=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE__ : str = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_snake_case ),
}
SCREAMING_SNAKE_CASE__ : Any = BertConfig.from_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = BertForMaskedLM(_snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_param.shape
SCREAMING_SNAKE_CASE__ : str = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
SCREAMING_SNAKE_CASE__ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"""word_embed.0.weight""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"""encoder.position_weight""" )
SCREAMING_SNAKE_CASE__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"""encoder.layer_norm.beta""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"""encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE__ : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE__ : int = check_and_map_params(
self_attn.key.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
SCREAMING_SNAKE_CASE__ : Dict = check_and_map_params(
self_attn.key.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
SCREAMING_SNAKE_CASE__ : Dict = check_and_map_params(
self_attn.query.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
self_attn.query.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = check_and_map_params(
self_attn.value.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
SCREAMING_SNAKE_CASE__ : List[str] = check_and_map_params(
self_attn.value.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE__ : Any = check_and_map_params(
self_output.dense.bias ,f'''encoder.transformer_cells.{i}.proj.bias''' )
SCREAMING_SNAKE_CASE__ : Dict = check_and_map_params(
self_output.dense.weight ,f'''encoder.transformer_cells.{i}.proj.weight''' )
SCREAMING_SNAKE_CASE__ : int = check_and_map_params(
self_output.LayerNorm.bias ,f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
SCREAMING_SNAKE_CASE__ : Any = check_and_map_params(
self_output.LayerNorm.weight ,f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE__ : int = check_and_map_params(
intermediate.dense.bias ,f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_and_map_params(
intermediate.dense.weight ,f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
SCREAMING_SNAKE_CASE__ : int = check_and_map_params(
bert_output.dense.bias ,f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
SCREAMING_SNAKE_CASE__ : Dict = check_and_map_params(
bert_output.dense.weight ,f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
SCREAMING_SNAKE_CASE__ : Any = check_and_map_params(
bert_output.LayerNorm.bias ,f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight ,f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaTokenizer.from_pretrained("""roberta-base""" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode_plus(_snake_case )["input_ids"]
# Get gluon output
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE__ : Optional[int] = original_bort(inputs=_snake_case ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_snake_case )
SCREAMING_SNAKE_CASE__ : str = BertModel.from_pretrained(_snake_case )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode_plus(_snake_case ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = hf_bort_model(**_snake_case )[0]
SCREAMING_SNAKE_CASE__ : Any = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE__ : Tuple = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE__ : List[Any] = np.allclose(_snake_case ,_snake_case ,atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" ,_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 721 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''detr'''
__UpperCamelCase : List[Any] = ['''past_key_values''']
__UpperCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__="resnet50" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ : int = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = None, None, None
SCREAMING_SNAKE_CASE__ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Tuple = backbone_config
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] = backbone
SCREAMING_SNAKE_CASE__ : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Tuple = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 545 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE = {
'b0': {
'hidden_dim': 1_2_8_0,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_2_4,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_2_8_0,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_4_0,
'dropout_rate': 0.2,
'dw_padding': [1_6],
},
'b2': {
'hidden_dim': 1_4_0_8,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_6_0,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 1_6],
},
'b3': {
'hidden_dim': 1_5_3_6,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_0_0,
'dropout_rate': 0.3,
'dw_padding': [5, 1_8],
},
'b4': {
'hidden_dim': 1_7_9_2,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_8_0,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_0_4_8,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_5_6,
'dropout_rate': 0.4,
'dw_padding': [1_3, 2_7],
},
'b6': {
'hidden_dim': 2_3_0_4,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_2_8,
'dropout_rate': 0.5,
'dw_padding': [3_1],
},
'b7': {
'hidden_dim': 2_5_6_0,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_0_0,
'dropout_rate': 0.5,
'dw_padding': [1_8],
},
}
def a (lowerCAmelCase__ ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]["""hidden_dim"""]
__a = CONFIG_MAP[model_name]["""width_coef"""]
__a = CONFIG_MAP[model_name]["""depth_coef"""]
__a = CONFIG_MAP[model_name]["""image_size"""]
__a = CONFIG_MAP[model_name]["""dropout_rate"""]
__a = CONFIG_MAP[model_name]["""dw_padding"""]
__a = """huggingface/label-files"""
__a = """imagenet-1k-id2label.json"""
__a = 1_000
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def a ():
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
def a (lowerCAmelCase__ ):
__a = CONFIG_MAP[model_name]["""image_size"""]
__a = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase__ , )
return preprocessor
def a (lowerCAmelCase__ ):
__a = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__a = sorted(set(lowerCAmelCase__ ) )
__a = len(lowerCAmelCase__ )
__a = {b: str(lowerCAmelCase__ ) for b, i in zip(lowerCAmelCase__ , range(lowerCAmelCase__ ) )}
__a = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = """efficientnet.""" + item[1]
__a = """classifier.weight"""
__a = """classifier.bias"""
return key_mapping
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(lowerCAmelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(lowerCAmelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(lowerCAmelCase__ ) )
else:
__a = torch.from_numpy(lowerCAmelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase__ )
@torch.no_grad()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model_classes[model_name](
include_top=lowerCAmelCase__ , weights="""imagenet""" , input_tensor=lowerCAmelCase__ , input_shape=lowerCAmelCase__ , pooling=lowerCAmelCase__ , classes=1_000 , classifier_activation="""softmax""" , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(lowerCAmelCase__ )
__a = EfficientNetForImageClassification(lowerCAmelCase__ ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__a = rename_keys(lowerCAmelCase__ )
replace_params(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(lowerCAmelCase__ )
__a = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]["""image_size"""]
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(lowerCAmelCase__ )
__a = np.expand_dims(lowerCAmelCase__ , axis=0 )
__a = original_model.predict(lowerCAmelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase__ ):
os.mkdir(lowerCAmelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase__ )
preprocessor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__a = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase__ )
hf_model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 99 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = PegasusTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = PegasusTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self ):
__magic_name__ = '''</s>'''
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(UpperCamelCase_ ) , 1103 )
def lowerCAmelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__magic_name__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
__magic_name__ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
__magic_name__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids[0]
__magic_name__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__magic_name__ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
__magic_name__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__magic_name__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ ).input_ids[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__magic_name__ = '''To ensure a smooth flow of bank resolutions.'''
__magic_name__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__magic_name__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ ).input_ids[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase__ ( self ):
__magic_name__ = ['''This is going to be way too long.''' * 150, '''short example''']
__magic_name__ = ['''not super long but more than 5 tokens''', '''tiny''']
__magic_name__ = self._large_tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
__magic_name__ = self._large_tokenizer(
text_target=UpperCamelCase_ , max_length=5 , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase__ ( self ):
# fmt: off
__magic_name__ = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = PegasusTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = PegasusTokenizer(UpperCamelCase_ , offset=0 , mask_token_sent=UpperCamelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__magic_name__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
__magic_name__ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
__magic_name__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids[0]
__magic_name__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_torch
def lowerCAmelCase__ ( self ):
__magic_name__ = ['''This is going to be way too long.''' * 1000, '''short example''']
__magic_name__ = ['''not super long but more than 5 tokens''', '''tiny''']
__magic_name__ = self._large_tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
__magic_name__ = self._large_tokenizer(
text_target=UpperCamelCase_ , max_length=5 , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase_ ) == 2 # input_ids, attention_mask.
def lowerCAmelCase__ ( self ):
__magic_name__ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
__magic_name__ = self._large_tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(
UpperCamelCase_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 490 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1.0E4 , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_UpperCAmelCase = float(embedding_dim // 2 )
_UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
_UpperCAmelCase = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
_UpperCAmelCase = scale * emb
if flip_sin_to_cos:
_UpperCAmelCase = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
_UpperCAmelCase = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
_UpperCAmelCase = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__UpperCamelCase )
_UpperCAmelCase = nn.silu(__UpperCamelCase )
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__UpperCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 1
@nn.compact
def __call__( self : List[str] , __UpperCamelCase : Any ):
return get_sinusoidal_embeddings(
__UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 129 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 129 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=5_12 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_multiple_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = weight_tying
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
UpperCAmelCase = output_from_no_past["hidden_states"][0]
UpperCAmelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Optional[int] =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase : str =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase : Union[str, Any] =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase : List[str] =False
lowercase : str =False
lowercase : Optional[Any] =False
lowercase : List[str] =False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase = "abeja/gpt-neox-japanese-2.7b"
UpperCAmelCase = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCAmelCase = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = []
for prompt in prompts:
UpperCAmelCase = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids
UpperCAmelCase = model.generate(UpperCamelCase__ , max_length=50 )
UpperCAmelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 323 |
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_() -> List[str]:
UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase = g.get_repo("huggingface/diffusers" )
UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
UpperCAmelCase = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 323 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __lowercase ( _a ):
return np.maximum(0 , _a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 716 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowercase__ : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Optional[int] = getattr(_a , _a ).shape
else:
snake_case_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : int = value
else:
snake_case_ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Tuple = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : List[str] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : Dict = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : Any = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "bias" in name:
snake_case_ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Optional[int] = '''weight'''
else:
snake_case_ : List[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case_ : int = name.split('''.''' )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None , _a=None , _a=True ):
if config_path is not None:
snake_case_ : int = UniSpeechSatConfig.from_pretrained(_a )
else:
snake_case_ : Tuple = UniSpeechSatConfig()
snake_case_ : List[Any] = ''''''
if is_finetuned:
snake_case_ : str = UniSpeechSatForCTC(_a )
else:
snake_case_ : Dict = UniSpeechSatForPreTraining(_a )
snake_case_, snake_case_, snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ : Dict = model[0].eval()
recursively_load_weights(_a , _a )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase__ : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 485 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_ ( __a : str , __a : complex , __a : str = "x" , __a : float = 10**-10 , __a : int = 1 , ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = symbols(__a )
_lowerCamelCase : Any = lambdify(__a , __a )
_lowerCamelCase : str = lambdify(__a , diff(__a , __a ) )
_lowerCamelCase : Union[str, Any] = starting_point
while True:
if diff_function(__a ) != 0:
_lowerCamelCase : List[str] = prev_guess - multiplicity * func(__a ) / diff_function(
__a )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_lowerCamelCase : List[str] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 437 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
a_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
a_ = re.compile(r"""([a-z\d])([A-Z])""")
a_ = re.compile(r"""(?<!_)_(?!_)""")
a_ = re.compile(r"""(_{2,})""")
a_ = r"""^\w+(\.\w+)*$"""
a_ = r"""<>:/\|?*"""
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = _uppercase_uppercase_re.sub(r'\1_\2' , __a )
_lowerCamelCase : Tuple = _lowercase_uppercase_re.sub(r'\1_\2' , __a )
return name.lower()
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = _single_underscore_re.split(__a )
_lowerCamelCase : Tuple = [_multiple_underscores_re.split(__a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__a ) if n != '' )
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__a )
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__a )}-{split}"
def UpperCAmelCase_ ( __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : List[str]=None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = filename_prefix_for_split(__a , __a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
_lowerCamelCase : List[str] = os.path.join(__a , __a )
return f"{filepath}*"
def UpperCAmelCase_ ( __a : str , __a : List[Any] , __a : List[str] , __a : Tuple=None , __a : Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Tuple = filename_prefix_for_split(__a , __a )
_lowerCamelCase : List[str] = os.path.join(__a , __a )
if shard_lengths:
_lowerCamelCase : Union[str, Any] = len(__a )
_lowerCamelCase : str = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__a )]
if filetype_suffix:
_lowerCamelCase : int = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCamelCase : int = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 437 | 1 |
from math import isclose, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: float , SCREAMING_SNAKE_CASE_: float , SCREAMING_SNAKE_CASE_: float ) -> tuple[float, float, float]:
'''simple docstring'''
A__ = point_y / 4 / point_x
A__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ = outgoing_gradient**2 + 4
A__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
A__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
A__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: float = 1.4 , SCREAMING_SNAKE_CASE_: float = -9.6 ) -> int:
'''simple docstring'''
A__ = 0
A__ = first_x_coord
A__ = first_y_coord
A__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ , A__ , A__ = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase__( UpperCamelCase__ : List[str] )->str:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : Dict )->Union[str, Any]:
A__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A__ = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
A__ = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
A__ = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
A__ = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
A__ = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
A__ = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
A__ = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
A__ = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
A__ = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
A__ = key.replace('''image_encoder.module''' , '''flava.image_model''' )
A__ = key.replace('''text_encoder.module''' , '''flava.text_model''' )
A__ = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
A__ = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
A__ = key.replace('''text_projection''' , '''flava.text_projection''' )
A__ = key.replace('''image_projection''' , '''flava.image_projection''' )
A__ = value.float()
for key, value in codebook_state_dict.items():
A__ = value
return upgrade
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str=None )->Optional[int]:
if config_path is not None:
A__ = FlavaConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = FlavaConfig()
A__ = FlavaForPreTraining(UpperCamelCase__ ).eval()
A__ = convert_dalle_checkpoint(UpperCamelCase__ , UpperCamelCase__ , save_checkpoint=UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
else:
A__ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )
A__ = upgrade_state_dict(UpperCamelCase__ , UpperCamelCase__ )
hf_model.load_state_dict(UpperCamelCase__ )
A__ = hf_model.state_dict()
A__ = count_parameters(UpperCamelCase__ )
A__ = count_parameters(UpperCamelCase__ ) + count_parameters(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__: Dict = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 190 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = hf_hub_download(
repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = VideoClassificationPipeline(model=__lowerCamelCase,image_processor=__lowerCamelCase,top_k=2 )
A__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
for example in examples:
A__ = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase,[
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10},crop_size={'''height''': 10, '''width''': 10} )
A__ = pipeline(
'''video-classification''',model=__lowerCamelCase,feature_extractor=__lowerCamelCase,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = video_classifier(__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
],)
@require_tf
def UpperCamelCase ( self ):
pass
| 190 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Dict , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Dict , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
A__ : Optional[int] = field
A__ : Dict = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
A__ : int = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def _lowercase ( self : Dict):
# Build iterable dataset
if self.streaming:
A__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
A__ : Optional[Any] = None
A__ : Any = None
A__ : Tuple = None
A__ : Tuple = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
A__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Any , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
A__ : Tuple = dataset
A__ : Tuple = path_or_buf
A__ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ : Tuple = num_proc
A__ : Any = "utf-8"
A__ : Optional[int] = to_json_kwargs
def _lowercase ( self : Union[str, Any]):
A__ : Any = self.to_json_kwargs.pop("path_or_buf" , _A)
A__ : Union[str, Any] = self.to_json_kwargs.pop("orient" , "records")
A__ : List[Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
A__ : List[Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
A__ : int = self.to_json_kwargs.pop("compression" , _A)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=_A) as buffer:
A__ : int = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead.")
A__ : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
return written
def _lowercase ( self : List[Any] , _A : int):
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = args
A__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size) , indices=self.dataset._indices , )
A__ : Dict = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowercase ( self : List[str] , _A : BinaryIO , _A : str , _A : List[Any] , _A : Optional[Any] , **_A : Optional[Any] , ):
A__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_A)
else:
A__ , A__ : Union[str, Any] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(_A)
return written | 182 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _lowercase ( self : List[Any]):
A__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_A).to(_A)
A__ : Any = AutoTokenizer.from_pretrained("google/mt5-small")
A__ : int = tokenizer("Hello there" , return_tensors="pt").input_ids
A__ : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
A__ : int = model(input_ids.to(_A) , labels=labels.to(_A)).loss
A__ : Optional[int] = -(labels.shape[-1] * loss.item())
A__ : List[str] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 182 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=1.0 , __lowerCAmelCase : int=None , __lowerCAmelCase : str=None ) -> int:
if rng is None:
__lowerCamelCase = global_rng
__lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : List[str]=20_00 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : str=1_60_00 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = min_seq_length
__lowerCamelCase = max_seq_length
__lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase = feature_size
__lowerCamelCase = padding_value
__lowerCamelCase = sampling_rate
__lowerCamelCase = return_attention_mask
__lowerCamelCase = do_normalize
def __A ( self : Any ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[int]:
def _flatten(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
__lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = ASTFeatureExtractor
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = ASTFeatureExtractionTester(self )
def __A ( self : Optional[Any] ) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCamelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test batched
__lowerCamelCase = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
__lowerCamelCase = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCamelCase = np.asarray(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
__lowerCamelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
@require_torch
def __A ( self : int ) -> List[Any]:
import torch
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase = np.random.rand(1_00 ).astype(np.floataa )
__lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
from datasets import load_dataset
__lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCamelCase = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self : Tuple ) -> List[Any]:
# fmt: off
__lowerCamelCase = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__lowerCamelCase = self._load_datasamples(1 )
__lowerCamelCase = ASTFeatureExtractor()
__lowerCamelCase = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 298 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Dict:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Dict:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Any ) -> Tuple:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Any ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : int ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> int:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ) -> Any:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : int ) ->float:
'''simple docstring'''
a : Tuple = x
a : Union[str, Any] = y
for step in range(_lowercase ): # noqa: B007
a : Any = a * a - b * b + x
a : str = 2 * a * b + y
a : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 800 , _lowercase : int = 600 , _lowercase : float = -0.6 , _lowercase : float = 0 , _lowercase : float = 3.2 , _lowercase : int = 50 , _lowercase : bool = True , ) ->Image.Image:
'''simple docstring'''
a : List[str] = Image.new("RGB" , (image_width, image_height) )
a : Union[str, Any] = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
a : List[Any] = figure_width / image_width * image_height
a : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
a : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
a : Union[str, Any] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a : List[Any] = get_color_coded_rgb(_lowercase )
else:
a : List[Any] = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : Union[str, Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 31 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( lowercase_ , lowercase_ ) -> str:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
_snake_case : List[str] = tmp_path / '''cache'''
_snake_case : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
_snake_case : Union[str, Any] = tmp_path / '''cache'''
_snake_case : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case : Dict = features.copy() if features else default_expected_features
_snake_case : str = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Dict = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
_snake_case : Dict = tmp_path / '''cache'''
_snake_case : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case : str = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
if issubclass(lowercase_ , lowercase_ ):
_snake_case : Any = parquet_path
elif issubclass(lowercase_ , lowercase_ ):
_snake_case : Optional[int] = [parquet_path]
_snake_case : Any = tmp_path / '''cache'''
_snake_case : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case : Optional[Any] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_=("train",) ) -> Dict:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_snake_case : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> str:
_snake_case : str = tmp_path / '''cache'''
_snake_case : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
_snake_case : Dict = tmp_path / '''cache'''
_snake_case : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case : Optional[int] = features.copy() if features else default_expected_features
_snake_case : str = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Any = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Any = '''train'''
_snake_case : Dict = {'''train''': parquet_path, '''test''': parquet_path}
_snake_case : Any = tmp_path / '''cache'''
_snake_case : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case : Union[str, Any] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A_ ( lowercase_ , lowercase_ ) -> Optional[Any]:
_snake_case : int = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_snake_case : Tuple = pq.ParquetFile(tmp_path / '''foo.parquet''' )
_snake_case : List[Any] = pf.read()
assert dataset.data.table == output_table
def A_ ( lowercase_ , lowercase_ ) -> List[str]:
_snake_case : Any = str(shared_datadir / '''test_image_rgb.jpg''' )
_snake_case : Dict = {'''image''': [image_path]}
_snake_case : Optional[int] = Features({'''image''': Image()} )
_snake_case : Optional[Any] = Dataset.from_dict(lowercase_ , features=lowercase_ )
_snake_case : List[str] = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_snake_case : Union[str, Any] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Dict = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A_ ( lowercase_ , lowercase_ ) -> Dict:
assert get_writer_batch_size(lowercase_ ) == expected
| 326 |
def A_ ( lowercase_ ) -> bool:
_snake_case : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( lowercase_ = 5000 ) -> int:
_snake_case : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ , len(lowercase_ ) ):
_snake_case : Optional[int] = pentagonal_nums[j]
_snake_case : Tuple = pentagonal_i + pentagonal_j
_snake_case : int = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_UpperCAmelCase : Tuple = getLogger(__name__)
_UpperCAmelCase : str = """cuda""" if torch.cuda.is_available() else """cpu"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 8, lowerCamelCase = DEFAULT_DEVICE, lowerCamelCase=False, lowerCamelCase="summarization", lowerCamelCase=None, **lowerCamelCase, ):
__lowerCAmelCase = Path(lowerCamelCase).open('''w''', encoding='''utf-8''')
__lowerCAmelCase = str(lowerCamelCase)
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase).to(lowerCamelCase)
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase)
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""") # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(lowerCamelCase, lowerCamelCase)
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config, '''prefix''', '''''') or ''''''
for examples_chunk in tqdm(list(chunks(lowerCamelCase, lowerCamelCase))):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(lowerCamelCase, return_tensors='''pt''', truncation=lowerCamelCase, padding='''longest''').to(lowerCamelCase)
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **lowerCamelCase, )
__lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase)
for hypothesis in dec:
fout.write(hypothesis + '''\n''')
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time) # seconds
__lowerCAmelCase = len(lowerCamelCase)
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4)}
def __magic_name__( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''')
def __magic_name__( lowerCamelCase=True):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''', type=lowerCamelCase, help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''input_path''', type=lowerCamelCase, help='''like cnn_dm/test.source''')
parser.add_argument('''save_path''', type=lowerCamelCase, help='''where to save summaries''')
parser.add_argument('''--reference_path''', type=lowerCamelCase, required=lowerCamelCase, help='''like cnn_dm/test.target''')
parser.add_argument('''--score_path''', type=lowerCamelCase, required=lowerCamelCase, default='''metrics.json''', help='''where to save metrics''')
parser.add_argument('''--device''', type=lowerCamelCase, required=lowerCamelCase, default=lowerCamelCase, help='''cuda, cuda:1, cpu etc.''')
parser.add_argument(
'''--prefix''', type=lowerCamelCase, required=lowerCamelCase, default=lowerCamelCase, help='''will be added to the begininng of src examples''')
parser.add_argument('''--task''', type=lowerCamelCase, default='''summarization''', help='''used for task_specific_params + metrics''')
parser.add_argument('''--bs''', type=lowerCamelCase, default=8, required=lowerCamelCase, help='''batch size''')
parser.add_argument(
'''--n_obs''', type=lowerCamelCase, default=-1, required=lowerCamelCase, help='''How many observations. Defaults to all.''')
parser.add_argument('''--fp16''', action='''store_true''')
parser.add_argument('''--dump-args''', action='''store_true''', help='''print the custom hparams with the results''')
parser.add_argument(
'''--info''', nargs='''?''', type=lowerCamelCase, const=datetime_now(), help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(lowerCamelCase)
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""")
__lowerCAmelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path).parent.mkdir(exist_ok=lowerCamelCase)
if args.reference_path is None and Path(args.score_path).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""")
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''')
__lowerCAmelCase = generate_summaries_or_translations(
lowerCamelCase, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **lowerCamelCase, )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path).readlines()][: len(lowerCamelCase)]
__lowerCAmelCase = score_fn(lowerCamelCase, lowerCamelCase)
scores.update(lowerCamelCase)
if args.dump_args:
scores.update(lowerCamelCase)
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(lowerCamelCase)
if args.score_path is not None:
json.dump(lowerCamelCase, open(args.score_path, '''w'''))
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 474 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , ):
super().__init__()
self.register_modules(transformer=__lowercase , vae=__lowercase , scheduler=__lowercase )
# create a imagenet -> id dictionary for easier use
__lowerCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__lowerCAmelCase = int(__lowercase )
__lowerCAmelCase = dict(sorted(self.labels.items() ) )
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = list(__lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , __lowercase , __lowercase = 4.0 , __lowercase = None , __lowercase = 50 , __lowercase = "pil" , __lowercase = True , ):
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = self.transformer.config.sample_size
__lowerCAmelCase = self.transformer.config.in_channels
__lowerCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowercase , device=self.device , dtype=self.transformer.dtype , )
__lowerCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCAmelCase = torch.tensor(__lowercase , device=self.device ).reshape(-1 )
__lowerCAmelCase = torch.tensor([10_00] * batch_size , device=self.device )
__lowerCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCAmelCase = latent_model_input[: len(__lowercase ) // 2]
__lowerCAmelCase = torch.cat([half, half] , dim=0 )
__lowerCAmelCase = self.scheduler.scale_model_input(__lowercase , __lowercase )
__lowerCAmelCase = t
if not torch.is_tensor(__lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCAmelCase = latent_model_input.device.type == '''mps'''
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = torch.floataa if is_mps else torch.floataa
else:
__lowerCAmelCase = torch.intaa if is_mps else torch.intaa
__lowerCAmelCase = torch.tensor([timesteps] , dtype=__lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCAmelCase = self.transformer(
__lowercase , timestep=__lowercase , class_labels=__lowercase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCAmelCase , __lowerCAmelCase = torch.split(__lowercase , len(__lowercase ) // 2 , dim=0 )
__lowerCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCAmelCase , __lowerCAmelCase = torch.split(__lowercase , __lowercase , dim=1 )
else:
__lowerCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCAmelCase = latent_model_input
__lowerCAmelCase = 1 / self.vae.config.scaling_factor * latents
__lowerCAmelCase = self.vae.decode(__lowercase ).sample
__lowerCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowercase )
| 474 | 1 |
from datetime import datetime
import requests
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
_snake_case : Dict = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_snake_case : int = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(lowerCAmelCase ).content
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter Video/IGTV url: """).strip()
lowerCAmelCase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 411 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCamelCase : Tuple="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : str="<pad>" , UpperCamelCase : Tuple=1_25 , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Dict , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_snake_case : Union[str, Any] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_snake_case : List[Any] = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_snake_case : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
_snake_case : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
_snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
super().__init__(
eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
_snake_case : Any = extra_ids
_snake_case : Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
_snake_case : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_snake_case : int = len(self.special_tokens_encoder )
_snake_case : Optional[int] = len(UpperCamelCase )
for i, token in enumerate(UpperCamelCase ):
_snake_case : int = self.vocab_size + i - n
_snake_case : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCamelCase_ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] ):
'''simple docstring'''
if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = self._add_eos_if_not_present(UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_snake_case : Tuple = self._add_eos_if_not_present(UpperCamelCase )
return token_ids_a + token_ids_a
def UpperCamelCase_ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = [chr(UpperCamelCase ) for i in text.encode('utf-8' )]
return tokens
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
_snake_case : Optional[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_snake_case : str = self.added_tokens_encoder[token]
elif len(UpperCamelCase ) != 1:
_snake_case : Optional[Any] = self.unk_token_id
else:
_snake_case : Tuple = ord(UpperCamelCase ) + self._num_special_tokens
return token_id
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
_snake_case : List[str] = self.special_tokens_decoder[index]
else:
_snake_case : Tuple = chr(index - self._num_special_tokens )
return token
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Tuple = B''
for token in tokens:
if token in self.special_tokens_decoder:
_snake_case : int = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_snake_case : Optional[int] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_snake_case : int = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_snake_case : int = token.encode('utf-8' )
else:
_snake_case : Optional[Any] = bytes([ord(UpperCamelCase )] )
bstring += tok_string
_snake_case : Tuple = bstring.decode('utf-8' , errors='ignore' )
return string
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 411 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : int = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 501 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : int = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowercase ( *lowerCAmelCase : str , **lowerCAmelCase : Any ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_a = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowercase ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Tuple ):
lowerCAmelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowercase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE__ , {
"""score""": ANY(SCREAMING_SNAKE_CASE__ ),
"""label""": ANY(SCREAMING_SNAKE_CASE__ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE__ ), """ymin""": ANY(SCREAMING_SNAKE_CASE__ ), """xmax""": ANY(SCREAMING_SNAKE_CASE__ ), """ymax""": ANY(SCREAMING_SNAKE_CASE__ )},
} , )
import datasets
lowerCAmelCase = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCAmelCase = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase = object_detector(SCREAMING_SNAKE_CASE__ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE__ , {
"""score""": ANY(SCREAMING_SNAKE_CASE__ ),
"""label""": ANY(SCREAMING_SNAKE_CASE__ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE__ ), """ymin""": ANY(SCREAMING_SNAKE_CASE__ ), """xmax""": ANY(SCREAMING_SNAKE_CASE__ ), """ymax""": ANY(SCREAMING_SNAKE_CASE__ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowercase ( self : Dict ):
pass
@require_torch
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
lowerCAmelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
lowerCAmelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowercase ( self : Tuple ):
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
lowerCAmelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowercase ( self : List[Any] ):
lowerCAmelCase = 0.9985
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowercase ( self : int ):
lowerCAmelCase = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase = 0.9993
lowerCAmelCase = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 169 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ConsistencyModelPipeline
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if class_cond:
snake_case: Optional[int] = self.dummy_cond_unet
else:
snake_case: List[str] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Any = self.get_dummy_components()
snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: List[Any] = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: str = 0
snake_case: List[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: int = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[Any] = self.get_dummy_components()
snake_case: Optional[int] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: str = 1
snake_case: Dict = None
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Dict = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 1
snake_case: List[str] = None
snake_case: Optional[Any] = 0
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: str = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
snake_case: str = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = latents
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
snake_case: Dict = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Union[str, Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_inputs()
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_inputs()
snake_case: List[Any] = 1
snake_case: Union[str, Any] = None
snake_case: str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: int = 1
snake_case: Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Tuple = image[0, -3:, -3:, -1]
snake_case: Union[str, Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 329 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
while second != 0:
__SCREAMING_SNAKE_CASE = first & second
first ^= second
__SCREAMING_SNAKE_CASE = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[int] = int(input('''Enter the first number: ''').strip())
a__ : Union[str, Any] = int(input('''Enter the second number: ''').strip())
print(F"{add(first, second) = }")
| 553 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
a__ : List[str] = 6_37_81_37.0
a__ : Tuple = 6_35_67_52.31_42_45
a__ : str = 6_3_7_8_1_3_7
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
# Equation
__SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 )
__SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_UpperCamelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ():
'''simple docstring'''
__A : Any = "https://pypi.org/pypi/diffusers/json"
__A : List[Any] = json.loads(request.urlopen(SCREAMING_SNAKE_CASE ).read() )["releases"].keys()
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : version.Version(SCREAMING_SNAKE_CASE ) )
def _lowercase ():
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = Path(SCREAMING_SNAKE_CASE ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
init_hf_modules()
__A : Union[str, Any] = Path(SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__A : Any = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
__A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
__A : int = re.findall("^\s*import\s+\.(\S+)\s*$" , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(SCREAMING_SNAKE_CASE ) )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Any = False
__A : Optional[Any] = [module_file]
__A : int = []
# Let's recurse through all relative imports
while not no_change:
__A : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(SCREAMING_SNAKE_CASE ) )
__A : Dict = Path(SCREAMING_SNAKE_CASE ).parent
__A : Optional[int] = [str(module_path / m ) for m in new_imports]
__A : Tuple = [f for f in new_import_files if f not in all_relative_imports]
__A : Union[str, Any] = [f"{f}.py" for f in new_import_files]
__A : Any = len(SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(SCREAMING_SNAKE_CASE )
return all_relative_imports
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
__A : str = f.read()
# Imports of the form `import xxx`
__A : Any = re.findall("^\s*import\s+(\S+)\s*$" , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
__A : Optional[Any] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
__A : int = list(set(SCREAMING_SNAKE_CASE ) )
__A : Optional[int] = []
for imp in imports:
try:
importlib.import_module(SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(SCREAMING_SNAKE_CASE )}`" )
return get_relative_imports(SCREAMING_SNAKE_CASE )
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Union[str, Any] = module_path.replace(os.path.sep , "." )
__A : Optional[int] = importlib.import_module(SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(SCREAMING_SNAKE_CASE )
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__A : Optional[Any] = dict(inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass ) )
__A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , SCREAMING_SNAKE_CASE )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
__A : List[str] = cls
return pipeline_class
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
'''simple docstring'''
__A : Union[str, Any] = str(SCREAMING_SNAKE_CASE )
__A : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
__A : int = module_file_or_url
__A : Optional[int] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
__A : List[str] = get_diffusers_versions()
# cut ".dev0"
__A : Optional[Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
__A : Tuple = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__A : str = f"v{revision}"
elif revision == "main":
__A : Optional[int] = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__A : Tuple = COMMUNITY_PIPELINES_URL.format(revision=SCREAMING_SNAKE_CASE , pipeline=SCREAMING_SNAKE_CASE )
try:
__A : Union[str, Any] = cached_download(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
__A : Dict = "git"
__A : Optional[int] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__A : str = hf_hub_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
__A : Any = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__A : int = check_imports(SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
__A : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(SCREAMING_SNAKE_CASE )
__A : Dict = Path(SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
__A : List[Any] = f"{module_needed}.py"
shutil.copy(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__A : Any = use_auth_token
elif use_auth_token is True:
__A : List[str] = HfFolder.get_token()
else:
__A : str = None
__A : Union[str, Any] = model_info(SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A : Dict = submodule_path / commit_hash
__A : Dict = full_submodule + os.path.sep + commit_hash
create_dynamic_module(SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
SCREAMING_SNAKE_CASE , f"{module_needed}.py" , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__A : Tuple = get_cached_module_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return get_class_in_module(SCREAMING_SNAKE_CASE , final_module.replace(".py" , "" ) )
| 111 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCamelCase = parser.parse_args()
if args.model_type == "roberta":
_UpperCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCamelCase = """roberta"""
elif args.model_type == "gpt2":
_UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCamelCase = """transformer"""
_UpperCamelCase = model.state_dict()
_UpperCamelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCamelCase = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCamelCase = F"""{prefix}.embeddings.{w}.weight"""
_UpperCamelCase = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCamelCase = F"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCamelCase = state_dict[param_name]
# Transformer Blocks #
_UpperCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCamelCase = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCamelCase = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCamelCase = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCamelCase = state_dict[F"""lm_head.dense.{w}"""]
_UpperCamelCase = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCamelCase = state_dict[F"""{prefix}.ln_f.{w}"""]
_UpperCamelCase = state_dict["""lm_head.weight"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 111 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ''''''
_UpperCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase : str = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str = "" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[dict] = None ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(self ,**SCREAMING_SNAKE_CASE__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__lowerCamelCase : int = fsspec.open(
SCREAMING_SNAKE_CASE__ ,mode='rb' ,protocol=SCREAMING_SNAKE_CASE__ ,compression=self.compression ,client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' ,{}), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
__lowerCamelCase : Optional[int] = os.path.basename(self.file.path.split('::')[0])
__lowerCamelCase : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
__lowerCamelCase : Optional[Any] = None
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(SCREAMING_SNAKE_CASE__).lstrip('/')
def lowerCAmelCase ( self : Tuple):
if self.dir_cache is None:
__lowerCamelCase : List[Any] = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
__lowerCamelCase : str = {f['name']: f}
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.file.open().read()
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str = "rb" ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Dict=None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : Optional[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE__)
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''bz2'''
_UpperCAmelCase : Any = '''bz2'''
_UpperCAmelCase : Dict = '''.bz2'''
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''gzip'''
_UpperCAmelCase : Optional[Any] = '''gzip'''
_UpperCAmelCase : Dict = '''.gz'''
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''lz4'''
_UpperCAmelCase : int = '''lz4'''
_UpperCAmelCase : List[Any] = '''.lz4'''
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''xz'''
_UpperCAmelCase : int = '''xz'''
_UpperCAmelCase : List[Any] = '''.xz'''
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''zstd'''
_UpperCAmelCase : Any = '''zstd'''
_UpperCAmelCase : Optional[int] = '''.zst'''
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str = "rb" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[dict] = None ,SCREAMING_SNAKE_CASE__ : int = DEFAULT_BLOCK_SIZE ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(
fo=SCREAMING_SNAKE_CASE__ ,mode=SCREAMING_SNAKE_CASE__ ,target_protocol=SCREAMING_SNAKE_CASE__ ,target_options=SCREAMING_SNAKE_CASE__ ,block_size=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__lowerCamelCase : Union[str, Any] = self.file.__enter__
class A_ :
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[str] = file_
def __enter__( self : Tuple):
self._file.__enter__()
return self
def __exit__( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any):
self._file.__exit__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def __iter__( self : int):
return iter(self._file)
def lowerCAmelCase ( self : str):
return next(self._file)
def __getattr__( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return getattr(self._file ,SCREAMING_SNAKE_CASE__)
def fixed_enter(*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Any):
return WrappedFile(_enter(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = fixed_enter
| 337 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def lowerCAmelCase ( self : Tuple):
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : List[str] = UNetaDConditionModel(
sample_size=3_2 ,layers_per_block=1 ,block_out_channels=[3_2, 6_4] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=3_2 ,encoder_hid_dim=3_2 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,)
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
__lowerCamelCase : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0)
__lowerCamelCase : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : Any):
torch.manual_seed(0)
__lowerCamelCase : int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Any = UNetaDConditionModel(
sample_size=3_2 ,layers_per_block=[1, 2] ,block_out_channels=[3_2, 6_4] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=3_2 ,encoder_hid_dim=3_2 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,class_embed_type='timestep' ,mid_block_scale_factor=1.414 ,time_embedding_act_fn='gelu' ,time_embedding_dim=3_2 ,)
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
__lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,)
torch.manual_seed(0)
__lowerCamelCase : Any = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inputs['prompt']
__lowerCamelCase : str = inputs['generator']
__lowerCamelCase : List[Any] = inputs['num_inference_steps']
__lowerCamelCase : Optional[Any] = inputs['output_type']
if "image" in inputs:
__lowerCamelCase : Dict = inputs['image']
else:
__lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
__lowerCamelCase : Optional[int] = inputs['mask_image']
else:
__lowerCamelCase : Dict = None
if "original_image" in inputs:
__lowerCamelCase : Dict = inputs['original_image']
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe.encode_prompt(SCREAMING_SNAKE_CASE__)
# inputs with prompt converted to embeddings
__lowerCamelCase : Union[str, Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCamelCase : List[str] = image
if mask_image is not None:
__lowerCamelCase : List[Any] = mask_image
if original_image is not None:
__lowerCamelCase : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__)
pipe_loaded.to(SCREAMING_SNAKE_CASE__)
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) is None ,F"`{optional_component}` did not stay set to None after loading." ,)
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['generator']
__lowerCamelCase : Any = inputs['num_inference_steps']
__lowerCamelCase : List[str] = inputs['output_type']
# inputs with prompt converted to embeddings
__lowerCamelCase : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCamelCase : Optional[int] = image
if mask_image is not None:
__lowerCamelCase : int = mask_image
if original_image is not None:
__lowerCamelCase : int = original_image
__lowerCamelCase : List[Any] = pipe_loaded(**SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Dict = np.abs(to_np(SCREAMING_SNAKE_CASE__) - to_np(SCREAMING_SNAKE_CASE__)).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1E-4)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__)
pipe_loaded.to(SCREAMING_SNAKE_CASE__)
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = pipe_loaded(**SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : int = np.abs(to_np(SCREAMING_SNAKE_CASE__) - to_np(SCREAMING_SNAKE_CASE__)).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1E-4)
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Optional[Any] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""GLPNFeatureExtractor"""]
__lowerCamelCase : Optional[int] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase = logging.get_logger(__name__)
# General docstring
lowerCamelCase = """PoolFormerConfig"""
# Base docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = """tabby, tabby cat"""
lowerCamelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
a__ : Tuple = 1 - drop_prob
a__ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a__ : Dict = keep_prob + torch.rand(__UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a__ : Optional[int] = input.div(__UpperCamelCase ) * random_tensor
return output
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = drop_prob
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return drop_path(__UpperCAmelCase , self.drop_prob , self.training )
def _A ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a__ : List[str] = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a__ : Union[str, Any] = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase )
a__ : Optional[Any] = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity()
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.projection(__UpperCAmelCase )
a__ : Union[str, Any] = self.norm(__UpperCAmelCase )
return embeddings
class _a ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : List[str] = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.pool(__UpperCAmelCase ) - hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : str = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : Any = PoolFormerDropPath(__UpperCAmelCase )
if isinstance(config.hidden_act , __UpperCAmelCase ):
a__ : List[Any] = ACTaFN[config.hidden_act]
else:
a__ : Union[str, Any] = config.hidden_act
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : Union[str, Any] = self.act_fn(__UpperCAmelCase )
a__ : Dict = self.drop(__UpperCAmelCase )
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : int = self.drop(__UpperCAmelCase )
return hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = PoolFormerPooling(__UpperCAmelCase )
a__ : Any = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Any = PoolFormerGroupNorm(__UpperCAmelCase )
a__ : Dict = PoolFormerGroupNorm(__UpperCAmelCase )
# Useful for training neural nets
a__ : List[Any] = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a__ : List[str] = config.use_layer_scale
if config.use_layer_scale:
a__ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
a__ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if self.use_layer_scale:
a__ : Any = self.pooling(self.before_norm(__UpperCAmelCase ) )
a__ : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a__ : Optional[int] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Dict = ()
a__ : List[Any] = self.output(self.after_norm(__UpperCAmelCase ) )
a__ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a__ : Optional[Any] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Optional[int] = (output,) + outputs
return outputs
else:
a__ : Optional[int] = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) )
# First residual connection
a__ : Tuple = pooling_output + hidden_states
a__ : Tuple = ()
# Second residual connection inside the PoolFormerOutput block
a__ : Optional[int] = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) )
a__ : str = hidden_states + layer_output
a__ : Optional[Any] = (output,) + outputs
return outputs
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = config
# stochastic depth decay rule
a__ : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a__ : List[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
# Transformer blocks
a__ : Optional[int] = []
a__ : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a__ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCAmelCase ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
"""simple docstring"""
a__ : int = () if output_hidden_states else None
a__ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a__ , a__ : Optional[Any] = layers
# Get patch embeddings from hidden_states
a__ : Any = embedding_layer(__UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCAmelCase ):
a__ : List[Any] = blk(__UpperCAmelCase )
a__ : Tuple = layer_outputs[0]
if output_hidden_states:
a__ : Optional[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = PoolFormerConfig
A :List[str] = "poolformer"
A :Tuple = "pixel_values"
A :List[str] = True
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict = value
lowerCamelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config
a__ : int = PoolFormerEncoder(__UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def _A ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a__ : List[Any] = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Any = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = self.dense(__UpperCAmelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config.num_labels
a__ : int = PoolFormerModel(__UpperCAmelCase )
# Final norm
a__ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a__ : Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
a__ : str = self.poolformer(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Optional[Any] = outputs[0]
a__ : Union[str, Any] = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) )
a__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ : str = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ : List[str] = "single_label_classification"
else:
a__ : str = "multi_label_classification"
if self.config.problem_type == "regression":
a__ : int = MSELoss()
if self.num_labels == 1:
a__ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a__ : Union[str, Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a__ : Any = CrossEntropyLoss()
a__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ : Union[str, Any] = BCEWithLogitsLoss()
a__ : Dict = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
a__ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 191 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = np.nan
for i in range(UpperCamelCase ):
lowerCAmelCase__ : List[str] = features[:, labels == i]
lowerCAmelCase__ : Optional[Any] = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ : Union[str, Any] = data - column_reshape(UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : Tuple = np.dot(UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = features.mean(1 )
lowerCAmelCase__ : int = np.nan
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Tuple = features[:, labels == i]
lowerCAmelCase__ : Dict = data.shape[1]
lowerCAmelCase__ : Tuple = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : Dict = device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if features.any():
lowerCAmelCase__ : List[str] = features.mean(1 )
# Center the dataset
lowerCAmelCase__ : Dict = features - np.reshape(UpperCamelCase , (data_mean.size, 1) )
lowerCAmelCase__ : Tuple = np.dot(UpperCamelCase , centered_data.T ) / features.shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = np.linalg.eigh(UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ : str = np.dot(filtered_eigenvectors.T , UpperCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ , lowerCAmelCase__ : str = eigh(
covariance_between_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , covariance_within_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
lowerCAmelCase__ : List[str] = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = np.linalg.svd(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ : Optional[Any] = np.dot(filtered_svd_matrix.T , UpperCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ : Optional[int] = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase ) as error_info:
lowerCAmelCase__ : Union[str, Any] = linear_discriminant_analysis(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : List[Any] = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase ) as error_info:
lowerCAmelCase__ : List[Any] = principal_component_analysis(UpperCamelCase , UpperCamelCase )
if not np.allclose(UpperCamelCase , UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.txt'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
_lowerCAmelCase = {
'''openbmb/cpm-ant-10b''': 1024,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = collections.OrderedDict()
with open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
lowerCAmelCase__ : Optional[int] = reader.readlines()
for index, token in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Tuple = token.rstrip("""\n""" )
lowerCAmelCase__ : int = index
return vocab
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=200 ) -> int:
lowerCAmelCase__ : Dict = vocab
lowerCAmelCase__ : Dict = unk_token
lowerCAmelCase__ : Tuple = max_input_chars_per_word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[int] = []
while start < len(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = None
while start < end:
lowerCAmelCase__ : int = """""".join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase__ : Optional[int] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = end
return sub_tokens
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
__lowercase : Tuple = False
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<d>" ,__UpperCAmelCase="</d>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="</n>" ,__UpperCAmelCase="</_>" ,__UpperCAmelCase="left" ,**__UpperCAmelCase ,) -> Dict:
requires_backends(self ,["""jieba"""] )
super().__init__(
bod_token=__UpperCAmelCase ,eod_token=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,line_token=__UpperCAmelCase ,space_token=__UpperCAmelCase ,padding_side=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : int = bod_token
lowerCAmelCase__ : Optional[Any] = eod_token
lowerCAmelCase__ : Union[str, Any] = load_vocab(__UpperCAmelCase )
lowerCAmelCase__ : int = self.encoder[space_token]
lowerCAmelCase__ : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __UpperCAmelCase : x[1] ) )
lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Optional[Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase_ ( self ) -> int:
return self.encoder["\n"]
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = []
for x in jieba.cut(__UpperCAmelCase ,cut_all=__UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCAmelCase ) )
return output_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase__ : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return token in self.encoder
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if os.path.isdir(__UpperCAmelCase ):
lowerCAmelCase__ : Any = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCAmelCase__ : Tuple = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
lowerCAmelCase__ : List[Any] = 0
if " " in self.encoder:
lowerCAmelCase__ : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase__ : List[Any] = self.encoder["""\n"""]
del self.encoder["\n"]
lowerCAmelCase__ : Dict = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __UpperCAmelCase : x[1] ) )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ : Tuple = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase ))
| 160 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase = threading.Lock()
lowercase = None
lowercase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase = logging.WARNING
lowercase = True
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = os.getenv('''TRANSFORMERS_VERBOSITY''', UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_ ( ):
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowerCamelCase_ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase__ = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase__ = False
def lowerCamelCase_ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase__ = None
def lowerCamelCase_ ( ):
'''simple docstring'''
return log_levels
def lowerCamelCase_ ( UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if name is None:
UpperCamelCase__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCamelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCamelCase_ ( UpperCamelCase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
UpperCamelCase__ = False
def lowerCamelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
UpperCamelCase__ = True
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase__ = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def lowerCamelCase_ ( self : Optional[Any], *UpperCamelCase__ : Optional[int], **UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''', UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__, **UpperCamelCase__ )
lowercase = warning_advice
@functools.lru_cache(UpperCamelCase__ )
def lowerCamelCase_ ( self : List[str], *UpperCamelCase__ : Optional[Any], **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
self.warning(*UpperCamelCase__, **UpperCamelCase__ )
lowercase = warning_once
class __lowercase :
'''simple docstring'''
def __init__( self : List[str] , *_a : Union[str, Any] , **_a : Any ): # pylint: disable=unused-argument
UpperCamelCase__ = args[0] if args else None
def __iter__( self : Optional[int] ):
return iter(self._iterator )
def __getattr__( self : Any , _a : Any ):
def empty_fn(*_a : int , **_a : Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : List[Any] , _a : Any , _a : Tuple , _a : List[Any] ):
return
class __lowercase :
'''simple docstring'''
def __call__( self : Union[str, Any] , *_a : Tuple , **_a : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm(*_a , **_a )
else:
return EmptyTqdm(*_a , **_a )
def A_ ( self : Union[str, Any] , *_a : str , **_a : Optional[int] ):
UpperCamelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a , **_a )
def A_ ( self : int ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase = _tqdm_cls()
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = True
hf_hub_utils.enable_progress_bars()
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = False
hf_hub_utils.disable_progress_bars()
| 240 | from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase = ["""text""", """image""", """audio"""]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase_ ( UpperCamelCase__ : List ):
'''simple docstring'''
UpperCamelCase__ = []
for output in outputs:
if isinstance(UpperCamelCase__, (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__, (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__, (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __lowercase :
'''simple docstring'''
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
UpperCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A_ ( self : str ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase__ = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def A_ ( self : List[str] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 240 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : int , A__ : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCAmelCase_ : List[str] = (low + high) // 2
lowerCAmelCase_ : int = max_subarray(A__ , A__ , A__ )
lowerCAmelCase_ : int = max_subarray(A__ , mid + 1 , A__ )
lowerCAmelCase_ : Optional[int] = max_cross_sum(A__ , A__ , A__ , A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : str = float("""-inf""" ), -1
lowerCAmelCase_ : Dict = float("""-inf""" ), -1
lowerCAmelCase_ : int | float = 0
for i in range(A__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCAmelCase_ : Optional[Any] = summ
lowerCAmelCase_ : Any = i
lowerCAmelCase_ : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCAmelCase_ : Optional[int] = summ
lowerCAmelCase_ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [randint(1 , A__ ) for _ in range(A__ )]
lowerCAmelCase_ : Tuple = time.time()
max_subarray(A__ , 0 , input_size - 1 )
lowerCAmelCase_ : str = time.time()
return end - start
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
lowerCAmelCase_ : Dict = [time_max_subarray(A__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(A__ , A__ ):
print(A__ , """\t\t""" , A__ )
plt.plot(A__ , A__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
lowerCAmelCase_ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Dict = StableDiffusionInpaintPipeline
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : List[str] = frozenset([] )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
A__ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
A__ = CLIPTextModel(__UpperCamelCase )
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
A__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(__UpperCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__UpperCamelCase )
else:
A__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ):
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInpaintPipeline(**__UpperCamelCase )
A__ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ = self.get_dummy_inputs(__UpperCamelCase )
A__ = sd_pipe(**__UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(__UpperCamelCase , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __A ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = PNDMScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
A__ = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , scheduler=__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type='''np''' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 719 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase_ : int = pytest.mark.integration
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda UpperCAmelCase__ , UpperCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
A__ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCAmelCase__ )
A__ , A__ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def __A ( self ):
import faiss
A__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
A__ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( _A : Union[str, Any] )-> List[Any]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = "index.faiss"
A__ = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(_A , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"acknowledged": True}
A__ = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
| 232 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( UpperCamelCase__ ):
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=0 ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] =1.0 if scale is None else scale
SCREAMING_SNAKE_CASE : List[Any] =0.0 if loc is None else loc
super().__init__(snake_case_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=snake_case_ )] )
@property
def __a ( self ) -> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def __a ( self ) -> str:
return self.base_dist.variance * self.scale**2
@property
def __a ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> None:
super().__init__(**snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =args_dim
SCREAMING_SNAKE_CASE : Any =nn.ModuleList([nn.Linear(snake_case_ , snake_case_ ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE : Dict =domain_map
def __a ( self , snake_case_ ) -> Tuple[torch.Tensor]:
SCREAMING_SNAKE_CASE : Dict =[proj(snake_case_ ) for proj in self.proj]
return self.domain_map(*snake_case_ )
class _lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case_ ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : Tuple =function
def __a ( self , snake_case_ , *snake_case_ ) -> Dict:
return self.function(snake_case_ , *snake_case_ )
class _lowerCAmelCase :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , snake_case_ = 1 ) -> None:
SCREAMING_SNAKE_CASE : Dict =dim
SCREAMING_SNAKE_CASE : Dict ={k: dim * self.args_dim[k] for k in self.args_dim}
def __a ( self , snake_case_ ) -> Optional[Any]:
if self.dim == 1:
return self.distribution_class(*snake_case_ )
else:
return Independent(self.distribution_class(*snake_case_ ) , 1 )
def __a ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Distribution:
SCREAMING_SNAKE_CASE : Optional[int] =self._base_distribution(snake_case_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(snake_case_ , loc=snake_case_ , scale=snake_case_ , event_dim=self.event_dim )
@property
def __a ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __a ( self ) -> int:
return len(self.event_shape )
@property
def __a ( self ) -> float:
return 0.0
def __a ( self , snake_case_ ) -> nn.Module:
return ParameterProjection(
in_features=snake_case_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __a ( self , *snake_case_ ) -> int:
raise NotImplementedError()
@staticmethod
def __a ( snake_case_ ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(snake_case_ ) + 4.0 )) / 2.0
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__ = StudentT
@classmethod
def __a ( cls , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict =cls.squareplus(snake_case_ ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE : int =2.0 + cls.squareplus(snake_case_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"loc": 1, "scale": 1}
lowerCamelCase__ = Normal
@classmethod
def __a ( cls , snake_case_ , snake_case_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] =cls.squareplus(snake_case_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"total_count": 1, "logits": 1}
lowerCamelCase__ = NegativeBinomial
@classmethod
def __a ( cls , snake_case_ , snake_case_ ) -> int:
SCREAMING_SNAKE_CASE : Any =cls.squareplus(snake_case_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __a ( self , snake_case_ ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] =distr_args
if self.dim == 1:
return self.distribution_class(total_count=snake_case_ , logits=snake_case_ )
else:
return Independent(self.distribution_class(total_count=snake_case_ , logits=snake_case_ ) , 1 )
def __a ( self , snake_case_ , snake_case_ = None , snake_case_ = None ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 258 |
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__a )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__a , __a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
SCREAMING_SNAKE_CASE : List[Any] =sum(
count_of_possible_combinations_with_dp_array(target - item , __a )
for item in array )
SCREAMING_SNAKE_CASE : List[str] =answer
return answer
SCREAMING_SNAKE_CASE : int =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] =[0] * (target + 1)
SCREAMING_SNAKE_CASE : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(__a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = 3
_A = 5
_A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 258 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """swinv2"""
snake_case = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=32 , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(UpperCAmelCase_ )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
snake_case_ = (0, 0, 0, 0)
| 713 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """gpt_neox"""
def __init__( self , UpperCAmelCase_=5_04_32 , UpperCAmelCase_=61_44 , UpperCAmelCase_=44 , UpperCAmelCase_=64 , UpperCAmelCase_=2_45_76 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.25 , UpperCAmelCase_=1_00_00 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20_48 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get("type" , UpperCAmelCase_ )
snake_case_ = self.rope_scaling.get("factor" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : int = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 578 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = downstream_dict["""projector.weight"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""projector.bias"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""connector.weight"""]
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__SCREAMING_SNAKE_CASE : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__SCREAMING_SNAKE_CASE : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : int = torch.load(_lowerCamelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE : Tuple = checkpoint["""Downstream"""]
__SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__SCREAMING_SNAKE_CASE : str = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__SCREAMING_SNAKE_CASE : int = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 578 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_A, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=_A, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_A )
return parser.parse_args()
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = parse_args()
# Import training_script as a module.
A_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A_ = script_fpath.stem
A_ = importlib.import_module(_A )
# Patch sys.argv
A_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 719 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = StableDiffusionPanoramaPipeline
__lowercase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__lowercase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> int:
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ = DDIMScheduler()
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> str:
A_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> List[Any]:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> str:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __A ( self ) -> Union[str, Any]:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ = '''french fries'''
A_ = sd_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> Tuple:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ = sd_pipe(**_SCREAMING_SNAKE_CASE , view_batch_size=2 )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> Any:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
A_ = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> str:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , _SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
A_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> List[Any]:
A_ = '''stabilityai/stable-diffusion-2-base'''
A_ = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __A ( self ) -> Optional[int]:
A_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_SCREAMING_SNAKE_CASE )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __A ( self ) -> List[str]:
A_ = 0
def callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A_ = False
A_ = '''stabilityai/stable-diffusion-2-base'''
A_ = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
A_ = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
pipe(**_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ = '''stabilityai/stable-diffusion-2-base'''
A_ = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
A_ = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ = self.get_inputs()
A_ = pipe(**_SCREAMING_SNAKE_CASE )
A_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 174 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6_55_21
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
for plain_chr in plain_text:
SCREAMING_SNAKE_CASE_ = (a + ord(UpperCamelCase__ )) % MOD_ADLER
SCREAMING_SNAKE_CASE_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 205 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , 'rb' ) as fp:
__lowerCamelCase = pickle.load(UpperCamelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__lowerCamelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ )
__lowerCamelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase = TransfoXLConfig()
else:
__lowerCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__lowerCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 469 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( __snake_case ):
a: Optional[int] = "naver-clova-ix/donut-base-finetuned-docvqa"
a: Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
a: Dict = "document_qa"
a: Dict = AutoProcessor
a: Optional[int] = VisionEncoderDecoderModel
a: str = ["image", "text"]
a: Any = ["text"]
def __init__( self: Optional[int] , *__UpperCamelCase: str , **__UpperCamelCase: Union[str, Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: "Image" , __UpperCamelCase: str ):
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = task_prompt.replace('''{user_input}''' , __UpperCamelCase )
_a = self.pre_processor.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors='''pt''' ).input_ids
_a = self.pre_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _A ( self: Union[str, Any] , __UpperCamelCase: Optional[int] ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCamelCase , ).sequences
def _A ( self: List[Any] , __UpperCamelCase: List[str] ):
_a = self.pre_processor.batch_decode(__UpperCamelCase )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_a = re.sub(R'''<.*?>''' , '''''' , __UpperCamelCase , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(__UpperCamelCase )
return sequence["answer"]
| 346 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __snake_case ( ) -> List[str]:
_a = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_UpperCamelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_UpperCamelCase , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_UpperCamelCase , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_UpperCamelCase , default=0 , help='''cuda_id.''' , )
_a = parser.parse_args()
return args
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
if not len(_UpperCamelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_a , _a = imgs[0].size
_a = Image.new('''RGB''' , size=(cols * w, rows * h) )
_a , _a = grid.size
for i, img in enumerate(_UpperCamelCase ):
grid.paste(_UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def __snake_case ( _UpperCamelCase , _UpperCamelCase="robotic cat with wings" , _UpperCamelCase=7.5 , _UpperCamelCase=50 , _UpperCamelCase=1 , _UpperCamelCase=42 , ) -> Optional[Any]:
_a = torch.Generator(pipeline.device ).manual_seed(_UpperCamelCase )
_a = pipeline(
_UpperCamelCase , guidance_scale=_UpperCamelCase , num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase , num_images_per_prompt=_UpperCamelCase , ).images
_a = int(math.sqrt(_UpperCamelCase ) )
_a = image_grid(_UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase :Optional[int] = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase :List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCamelCase :Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCamelCase :Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCamelCase :Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCamelCase :Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCamelCase :List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCamelCase :Dict = unet.to(torch.device('cuda', args.cuda_id))
lowerCamelCase :List[str] = pipeline.to(unet.device)
lowerCamelCase , lowerCamelCase :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCamelCase :str = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 346 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
SCREAMING_SNAKE_CASE : int = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
SCREAMING_SNAKE_CASE : str = {
"google/reformer-crime-and-punishment": 524288,
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase=[], lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : List[str] = vocab_file
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase ( self) -> Dict[str, int]:
"""simple docstring"""
_lowercase : str = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Dict = {}
_lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_lowercase : Any = self.sp_model.IdToPiece(lowerCamelCase)
return token
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Optional[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase)
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 89 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
snake_case__ = "ZinengTang/tvlt-base"
snake_case__ = tempfile.mkdtemp()
def A_ ( self , **lowerCamelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self , **lowerCamelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 276 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( __snake_case ):
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : Iterable[torch.nn.Parameter] , _A : float = 0.9999 , _A : float = 0.0 , _A : int = 0 , _A : bool = False , _A : Union[float, int] = 1.0 , _A : Union[float, int] = 2 / 3 , _A : Optional[Any] = None , _A : Dict[str, Any] = None , **_A : Dict , ):
if isinstance(_A , torch.nn.Module ):
_UpperCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _A , standard_warn=_A , )
_UpperCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCamelCase = True
if kwargs.get('''max_value''' , _A ) is not None:
_UpperCamelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _A , standard_warn=_A )
_UpperCamelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , _A ) is not None:
_UpperCamelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _A , standard_warn=_A )
_UpperCamelCase = kwargs['''min_value''']
_UpperCamelCase = list(_A )
_UpperCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _A ) is not None:
_UpperCamelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _A , standard_warn=_A )
self.to(device=kwargs['''device'''] )
_UpperCamelCase = None
_UpperCamelCase = decay
_UpperCamelCase = min_decay
_UpperCamelCase = update_after_step
_UpperCamelCase = use_ema_warmup
_UpperCamelCase = inv_gamma
_UpperCamelCase = power
_UpperCamelCase = 0
_UpperCamelCase = None # set in `step()`
_UpperCamelCase = model_cls
_UpperCamelCase = model_config
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , _A : Any , _A : str ):
_UpperCamelCase , _UpperCamelCase = model_cls.load_config(_A , return_unused_kwargs=_A )
_UpperCamelCase = model_cls.from_pretrained(_A )
_UpperCamelCase = cls(model.parameters() , model_cls=_A , model_config=model.config )
ema_model.load_state_dict(_A )
return ema_model
def UpperCamelCase_ ( self : Any , _A : str ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCamelCase = self.model_cls.from_config(self.model_config )
_UpperCamelCase = self.state_dict()
state_dict.pop('''shadow_params''' , _A )
model.register_to_config(**_A )
self.copy_to(model.parameters() )
model.save_pretrained(_A )
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
_UpperCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCamelCase = (1 + step) / (10 + step)
_UpperCamelCase = min(_A , self.decay )
# make sure decay is not smaller than min_decay
_UpperCamelCase = max(_A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCamelCase_ ( self : Union[str, Any] , _A : Iterable[torch.nn.Parameter] ):
if isinstance(_A , torch.nn.Module ):
_UpperCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _A , standard_warn=_A , )
_UpperCamelCase = parameters.parameters()
_UpperCamelCase = list(_A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCamelCase = self.get_decay(self.optimization_step )
_UpperCamelCase = decay
_UpperCamelCase = 1 - decay
_UpperCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCamelCase = deepspeed.zero.GatheredParameters(_A , modifier_rank=_A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_A )
def UpperCamelCase_ ( self : str , _A : Iterable[torch.nn.Parameter] ):
_UpperCamelCase = list(_A )
for s_param, param in zip(self.shadow_params , _A ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any]=None , _A : Dict=None ):
_UpperCamelCase = [
p.to(device=_A , dtype=_A ) if p.is_floating_point() else p.to(device=_A )
for p in self.shadow_params
]
def UpperCamelCase_ ( self : Any ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCamelCase_ ( self : List[Any] , _A : Iterable[torch.nn.Parameter] ):
_UpperCamelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCamelCase_ ( self : str , _A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCamelCase = None
def UpperCamelCase_ ( self : Any , _A : dict ):
_UpperCamelCase = copy.deepcopy(_A )
_UpperCamelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCamelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _A ):
raise ValueError('''Invalid min_decay''' )
_UpperCamelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _A ):
raise ValueError('''Invalid optimization_step''' )
_UpperCamelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _A ):
raise ValueError('''Invalid update_after_step''' )
_UpperCamelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _A ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCamelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCamelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCamelCase = state_dict.get('''shadow_params''' , _A )
if shadow_params is not None:
_UpperCamelCase = shadow_params
if not isinstance(self.shadow_params , _A ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase_ ( __snake_case : Any ) -> Any:
'''simple docstring'''
if isinstance(__snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _snake_case :
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> str:
pass
def lowerCAmelCase_ ( self ) -> str:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
pass
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase ,UpperCamelCase ,f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> Dict:
snake_case__ :Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :List[str] = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Dict = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> List[Any]:
snake_case__ , snake_case__ :Any = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[int] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :str = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> Union[str, Any]:
snake_case__ , snake_case__ :List[Any] = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[int] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :Any = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
snake_case__ :Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
snake_case__ :Any = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
snake_case__ :Any = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
snake_case__ :List[str] = after_output[0]
snake_case__ :Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> int:
snake_case__ , snake_case__ :List[Any] = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :int = model(
input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase ,output_attentions=UpperCamelCase )
snake_case__ :Tuple = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ :Optional[Any] = to_atuple(vision_model.config.image_size )
snake_case__ :int = to_atuple(vision_model.config.patch_size )
snake_case__ :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ :List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ :Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
pt_model.to(UpperCamelCase )
pt_model.eval()
# prepare inputs
snake_case__ :Optional[int] = inputs_dict
snake_case__ :List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
snake_case__ :str = pt_model(**UpperCamelCase ).to_tuple()
snake_case__ :Optional[Any] = fx_model(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase )
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ,from_pt=UpperCamelCase )
snake_case__ :str = fx_model_loaded(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase ,from_flax=UpperCamelCase )
pt_model_loaded.to(UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
snake_case__ :Tuple = pt_model_loaded(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output_loaded.numpy() ,4E-2 )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :str = VisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :int = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,UpperCamelCase )
snake_case__ :int = fx_state
self.check_pt_flax_equivalence(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = VisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Union[str, Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :List[Any] = load_flax_weights_in_pytorch_model(UpperCamelCase ,fx_model.params )
self.check_pt_flax_equivalence(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase )
@is_pt_flax_cross_test
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = self.prepare_config_and_inputs()
snake_case__ :Optional[int] = config_inputs_dict.pop("vision_config" )
snake_case__ :Dict = config_inputs_dict.pop("text_config" )
snake_case__ :Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
self.check_equivalence_flax_to_pt(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :Dict = self.get_pretrained_model_and_inputs()
snake_case__ :List[Any] = model_a(**UpperCamelCase )
snake_case__ :Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase )
snake_case__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = model_a(**UpperCamelCase )
snake_case__ :Tuple = after_outputs[0]
snake_case__ :Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase ,1E-5 )
@require_flax
class _snake_case ( _A , unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=UpperCamelCase ,text_from_pt=UpperCamelCase ,)
snake_case__ :Union[str, Any] = 13
snake_case__ :List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ :Union[str, Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ :Optional[int] = random_attention_mask([batch_size, 4] )
snake_case__ :Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :Dict = FlaxViTModel(UpperCamelCase )
snake_case__ :Tuple = FlaxBertModel(UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[int] = FlaxViTModelTester(self )
snake_case__ :Optional[Any] = FlaxBertModelTester(self )
snake_case__ :Dict = vit_model_tester.prepare_config_and_inputs()
snake_case__ :int = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ :int = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _snake_case ( _A , unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=UpperCamelCase ,text_from_pt=UpperCamelCase ,)
snake_case__ :List[Any] = 13
snake_case__ :Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ :Union[str, Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ :Optional[Any] = random_attention_mask([batch_size, 4] )
snake_case__ :Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Any = FlaxCLIPVisionModel(UpperCamelCase )
snake_case__ :Union[str, Any] = FlaxBertModel(UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = FlaxCLIPVisionModelTester(self )
snake_case__ :Optional[int] = FlaxBertModelTester(self )
snake_case__ :Tuple = clip_model_tester.prepare_config_and_inputs()
snake_case__ :int = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ :Optional[int] = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" ,logit_scale_init_value=1.0 )
snake_case__ :Any = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
snake_case__ :Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case__ :Tuple = processor(
text=["una foto di un gatto", "una foto di un cane"] ,images=UpperCamelCase ,padding=UpperCamelCase ,return_tensors="np" )
snake_case__ :List[Any] = model(**UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
snake_case__ :Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,UpperCamelCase ,atol=1E-3 ) ) | 57 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 57 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowercase__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
lowercase__ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'whisper'
_snake_case : Union[str, Any] = ['past_key_values']
_snake_case : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , lowerCAmelCase__ : List[str]=51865 , lowerCAmelCase__ : Optional[Any]=80 , lowerCAmelCase__ : Optional[int]=6 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=6 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : List[Any]=1536 , lowerCAmelCase__ : Dict=1536 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=50257 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[int]=256 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : int=1500 , lowerCAmelCase__ : int=448 , lowerCAmelCase__ : Union[str, Any]=50256 , lowerCAmelCase__ : str=50256 , lowerCAmelCase__ : Optional[int]=50256 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=[220, 50256] , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : str=256 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=0.05 , lowerCAmelCase__ : Optional[int]=10 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Dict=10 , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : List[Any]=7 , **lowerCAmelCase__ : List[str] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = num_mel_bins
_UpperCamelCase = d_model
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase = max_source_positions
_UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
_UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
_UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , suppress_tokens=lowerCAmelCase__ , begin_suppress_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def snake_case__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase = {0: '''batch'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
return common_inputs
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 22050 , lowerCAmelCase__ : float = 5.0 , lowerCAmelCase__ : int = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = OrderedDict()
_UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCAmelCase__ , framework=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , time_duration=lowerCAmelCase__ , frequency=lowerCAmelCase__ , )
_UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = encoder_inputs.pop('''input_features''' )
_UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1e-3
| 98 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
lowerCAmelCase__: Tuple = """Hello world! cécé herlolip"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = FairseqRobertaModel.from_pretrained(_lowerCAmelCase )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE_ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ : int = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
SCREAMING_SNAKE_CASE_ : Dict = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = XLMRobertaXLForSequenceClassification(_lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ : Dict = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE_ : Tuple = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE_ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE_ : int = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE_ : Dict = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE_ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE_ : Any = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ : str = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Dict = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE_ : Dict = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE_ : List[str] = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE_ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE_ : Dict = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ : torch.Tensor = roberta.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE_ : str = model(_lowerCAmelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ : str = roberta.model.classification_heads["mnli"](roberta.extract_features(_lowerCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roberta.model(_lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE_ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ : str = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCAmelCase__: Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 716 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__: List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Optional[int] = ["DPTFeatureExtractor"]
lowerCAmelCase__: Any = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Tuple = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 0 |
lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 462 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__UpperCAmelCase :Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCAmelCase :Union[str, Any] = 1_2_8_0_2_2
__UpperCAmelCase :List[str] = 1_2_8_0_2_8
@require_sentencepiece
class a ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MaMaaaTokenizer
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = True
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase : List[str] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__UpperCAmelCase : Optional[int] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__UpperCAmelCase : Any = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__UpperCAmelCase : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str , **snake_case : Tuple ) -> Any:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : str , snake_case : Tuple ) -> str:
return (
"This is a test",
"This is a test",
)
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = '''</s>'''
__UpperCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
__UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''This is a test''' )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
# fmt: off
__UpperCAmelCase : List[Any] = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE : Optional[Any] = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE : Optional[int] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
__UpperCAmelCase : str = 1
return cls
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = '''en'''
__UpperCAmelCase : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
__UpperCAmelCase : List[str] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
__UpperCAmelCase : str = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : int ) -> int:
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
__UpperCAmelCase : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = '''en'''
__UpperCAmelCase : Optional[int] = '''fr'''
__UpperCAmelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__UpperCAmelCase : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase__ ( self : int ) -> Dict:
__UpperCAmelCase : Dict = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__UpperCAmelCase : int = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__UpperCAmelCase : Dict = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase__ ( self : List[str] ) -> Any:
__UpperCAmelCase : Any = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , ) | 709 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = JukeboxTokenizer
SCREAMING_SNAKE_CASE : Tuple = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
import torch
__UpperCAmelCase : List[str] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__UpperCAmelCase : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
import torch
__UpperCAmelCase : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__UpperCAmelCase : List[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 266 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = 13
UpperCamelCase : Tuple = 7
UpperCamelCase : int = 30
UpperCamelCase : Any = self.seq_length + self.mem_len
UpperCamelCase : List[Any] = 15
UpperCamelCase : Any = True
UpperCamelCase : Any = True
UpperCamelCase : Optional[Any] = 99
UpperCamelCase : str = [10, 50, 80]
UpperCamelCase : Tuple = 32
UpperCamelCase : Optional[int] = 32
UpperCamelCase : Dict = 4
UpperCamelCase : Any = 8
UpperCamelCase : str = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : Dict = 0
UpperCamelCase : Any = 3
UpperCamelCase : int = self.vocab_size - 1
UpperCamelCase : List[str] = 0.01
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Any = TransfoXLConfig(
vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case_ ( self ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : str = TFTransfoXLModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCamelCase : str = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCamelCase , UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Optional[int] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCamelCase : List[str] = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCamelCase , UpperCamelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase : List[Any] = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Union[str, Any] = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Dict = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__ : Optional[Any] = () if is_tf_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Any = False
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = TFTransfoXLModelTester(self )
UpperCamelCase : int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, d_embed=37 )
def snake_case_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Union[str, Any]:
self.model_tester.set_seed()
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
self.model_tester.set_seed()
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase : Any = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE_, tf.keras.layers.Layer )
UpperCamelCase : str = model.get_bias()
assert name is None
else:
UpperCamelCase : Tuple = model.get_output_embeddings()
assert x is None
UpperCamelCase : Tuple = model.get_bias()
assert name is None
def snake_case_ ( self ) -> List[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case_ ( self ) -> List[Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def snake_case_ ( self ) -> List[str]:
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCamelCase : Optional[int] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]], dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase : List[str] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase : List[Any] = model.generate(SCREAMING_SNAKE_CASE_, max_length=200, do_sample=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(output_ids[0].numpy().tolist(), SCREAMING_SNAKE_CASE_ )
| 40 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a_ :
pass
| 350 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
__lowerCamelCase : List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
UpperCAmelCase : Optional[int] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : int = field(
default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[Any] = field(
default=_lowercase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
UpperCAmelCase : Any = field(default=_lowercase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Tuple = field(default=_lowercase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Any = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : List[Any] = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
UpperCAmelCase : int = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
UpperCAmelCase : Tuple = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[Any] = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
lowercase = {}
if self.train_dir is not None:
lowercase = self.train_dir
if self.validation_dir is not None:
lowercase = self.validation_dir
lowercase = data_files if data_files else None
@dataclass
class UpperCAmelCase :
UpperCAmelCase : str = field(
default=_lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : List[str] = field(
default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , )
UpperCAmelCase : str = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase : Dict = field(
default=_lowercase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[Any] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
UpperCAmelCase : Any = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : Optional[Any] = field(default=_lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : List[str] = field(
default=_lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : int = field(
default=_lowercase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
UpperCAmelCase : Optional[Any] = field(
default=_lowercase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
UpperCAmelCase : Any = field(
default=_lowercase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class UpperCAmelCase :
def __init__(self : Dict , A__ : Tuple=1_9_2 , A__ : Optional[Any]=3_2 , A__ : Tuple=4 , A__ : Tuple=0.6 ) -> Optional[int]:
lowercase = input_size
lowercase = mask_patch_size
lowercase = model_patch_size
lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowercase = self.input_size // self.mask_patch_size
lowercase = self.mask_patch_size // self.model_patch_size
lowercase = self.rand_size**2
lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self : Dict ) -> List[str]:
lowercase = np.random.permutation(self.token_count )[: self.mask_count]
lowercase = np.zeros(self.token_count , dtype=__UpperCamelCase )
lowercase = 1
lowercase = mask.reshape((self.rand_size, self.rand_size) )
lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = torch.stack([example["pixel_values"] for example in examples] )
lowercase = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase = ds["train"].train_test_split(data_args.train_val_split )
lowercase = split["train"]
lowercase = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_SCREAMING_SNAKE_CASE , "decoder_type" ):
lowercase = "simmim"
# adapt config
lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase = AutoModelForMaskedImageModeling.from_config(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
lowercase = ds["train"].column_names
else:
lowercase = ds["validation"].column_names
if data_args.image_column_name is not None:
lowercase = data_args.image_column_name
elif "image" in column_names:
lowercase = "image"
elif "img" in column_names:
lowercase = "img"
else:
lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase = Compose(
[
Lambda(lambda lowerCAmelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowerCAmelCase_ ):
lowercase = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initialize our trainer
lowercase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase : ClassVar[Features] = Features({'''image''': Image()} )
UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase : str = "image"
UpperCAmelCase : str = "labels"
def UpperCAmelCase__ (self : List[str] , A__ : Union[str, Any] ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def UpperCAmelCase__ (self : List[Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 459 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def A ( self , snake_case_ ) -> List[Any]:
'''simple docstring'''
with open(snake_case_ , encoding='''utf-8''' ) as input_file:
__lowercase = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
__lowercase = input_file.read()
__lowercase = regexp.search(snake_case_ )
return match
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
with open(snake_case_ , encoding='''utf-8''' ) as input_file:
__lowercase = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
__lowercase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowercase = regexp.finditer(snake_case_ )
__lowercase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self ) -> str:
'''simple docstring'''
__lowercase = Path('''./datasets''' )
__lowercase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case_ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = Path('''./datasets''' )
__lowercase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case_ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 639 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 639 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def lowercase ( UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : int = 3 , ):
"""simple docstring"""
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) -> int:
return (pow(UpperCamelCase , 2 ) + step) % modulus
for _ in range(UpperCamelCase ):
# These track the position within the cycle detection logic.
A__ : Optional[int] =seed
A__ : str =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A__ : Optional[int] =rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : int =rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : List[Any] =rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A__ : Optional[int] =gcd(hare - tortoise , UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A__ : Optional[Any] =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__A : Optional[Any] = parser.parse_args()
__A : str = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__A : Tuple = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 595 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__A : List[Any] = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : int =EfficientNetConfig()
A__ : Optional[int] =CONFIG_MAP[model_name]["hidden_dim"]
A__ : List[Any] =CONFIG_MAP[model_name]["width_coef"]
A__ : Tuple =CONFIG_MAP[model_name]["depth_coef"]
A__ : Union[str, Any] =CONFIG_MAP[model_name]["image_size"]
A__ : Dict =CONFIG_MAP[model_name]["dropout_rate"]
A__ : Any =CONFIG_MAP[model_name]["dw_padding"]
A__ : Tuple ="huggingface/label-files"
A__ : Tuple ="imagenet-1k-id2label.json"
A__ : Optional[int] =1000
A__ : List[str] =json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
A__ : Any ={int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ : List[str] =idalabel
A__ : Optional[Any] ={v: k for k, v in idalabel.items()}
return config
def lowercase ( ):
"""simple docstring"""
A__ : List[str] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : int =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[Any] =CONFIG_MAP[model_name]["image_size"]
A__ : List[str] =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=UpperCamelCase , )
return preprocessor
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : List[str] =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
A__ : Optional[Any] =sorted(set(UpperCamelCase ) )
A__ : List[Any] =len(UpperCamelCase )
A__ : int ={b: str(UpperCamelCase ) for b, i in zip(UpperCamelCase , range(UpperCamelCase ) )}
A__ : List[Any] =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
A__ : List[Any] =block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
A__ : List[str] ={}
for item in rename_keys:
if item[0] in original_param_names:
A__ : Union[str, Any] ="efficientnet." + item[1]
A__ : str ="classifier.weight"
A__ : Tuple ="classifier.bias"
return key_mapping
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
A__ : str =key_mapping[key]
if "_conv" in key and "kernel" in key:
A__ : Optional[int] =torch.from_numpy(UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A__ : Optional[int] =torch.from_numpy(UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A__ : str =torch.from_numpy(np.transpose(UpperCamelCase ) )
else:
A__ : Optional[int] =torch.from_numpy(UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] =model_classes[model_name](
include_top=UpperCamelCase , weights="imagenet" , input_tensor=UpperCamelCase , input_shape=UpperCamelCase , pooling=UpperCamelCase , classes=1000 , classifier_activation="softmax" , )
A__ : Union[str, Any] =original_model.trainable_variables
A__ : str =original_model.non_trainable_variables
A__ : Any ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A__ : int =param.numpy()
A__ : Optional[Any] =list(tf_params.keys() )
# Load HuggingFace model
A__ : Optional[Any] =get_efficientnet_config(UpperCamelCase )
A__ : List[str] =EfficientNetForImageClassification(UpperCamelCase ).eval()
A__ : Union[str, Any] =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
A__ : List[Any] =rename_keys(UpperCamelCase )
replace_params(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Initialize preprocessor and preprocess input image
A__ : int =convert_image_processor(UpperCamelCase )
A__ : List[str] =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
A__ : Any =hf_model(**UpperCamelCase )
A__ : Union[str, Any] =outputs.logits.detach().numpy()
# Original model inference
A__ : Union[str, Any] =False
A__ : Tuple =CONFIG_MAP[model_name]["image_size"]
A__ : int =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A__ : Any =image.img_to_array(UpperCamelCase )
A__ : Dict =np.expand_dims(UpperCamelCase , axis=0 )
A__ : int =original_model.predict(UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(UpperCamelCase ):
os.mkdir(UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(UpperCamelCase )
preprocessor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
A__ : Tuple =F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(UpperCamelCase )
hf_model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__A : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 595 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''file.csv'''
UpperCAmelCase_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''malformed_file.csv'''
UpperCAmelCase_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''csv_with_image.csv'''
UpperCAmelCase_ = textwrap.dedent(
f"""\\n image\n {image_file}\n """ )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''csv_with_label.csv'''
UpperCAmelCase_ = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''csv_with_int_list.csv'''
UpperCAmelCase_ = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Csv()
UpperCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
with open(a_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.read().splitlines()[1]
UpperCAmelCase_ = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
UpperCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
UpperCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
UpperCAmelCase_ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( snake_case_ : Any ) -> Tuple:
'''simple docstring'''
with open(a_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.read().splitlines()[1:]
UpperCAmelCase_ = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
UpperCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
UpperCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
UpperCAmelCase_ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(a_ ) for label in labels]
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda snake_case_ : [int(a_ ) for i in x.split()]} )
UpperCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
UpperCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
UpperCAmelCase_ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 78 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = AltDiffusionPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__A : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0)
a__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
a__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
a__ : Optional[Any] = CLIPTextModel(lowercase)
a__ : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
a__ : List[str] = 77
a__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : Optional[Any] = torch.manual_seed(lowercase)
else:
a__ : List[str] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def __lowercase ( self) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Tuple = self.get_dummy_components()
torch.manual_seed(0)
a__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
a__ : Dict = RobertaSeriesModelWithTransformation(lowercase)
a__ : str = text_encoder
a__ : str = AltDiffusionPipeline(**lowercase)
a__ : Optional[Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = self.get_dummy_inputs(lowercase)
a__ : Optional[Any] = 'A photo of an astronaut'
a__ : Any = alt_pipe(**lowercase)
a__ : Optional[int] = output.images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : List[Any] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowercase)
torch.manual_seed(0)
a__ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
a__ : Optional[int] = RobertaSeriesModelWithTransformation(lowercase)
a__ : str = text_encoder
a__ : Optional[Any] = AltDiffusionPipeline(**lowercase)
a__ : Dict = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = self.get_dummy_inputs(lowercase)
a__ : List[str] = alt_pipe(**lowercase)
a__ : List[Any] = output.images
a__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Any = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowercase)
a__ : Any = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = 'A painting of a squirrel eating a burger'
a__ : Optional[Any] = torch.manual_seed(0)
a__ : List[Any] = alt_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=20 , output_type='np')
a__ : Dict = output.images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Tuple = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler')
a__ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowercase , safety_checker=lowercase)
a__ : Dict = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__ : Any = 'A painting of a squirrel eating a burger'
a__ : Dict = torch.manual_seed(0)
a__ : str = alt_pipe([prompt] , generator=lowercase , num_inference_steps=2 , output_type='numpy')
a__ : int = output.images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Any = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 716 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=0 , ) -> Dict:
'''simple docstring'''
a__ : str = parent
a__ : int = batch_size
a__ : Optional[int] = seq_length
a__ : Any = is_training
a__ : List[Any] = use_input_mask
a__ : Dict = use_token_type_ids
a__ : str = use_labels
a__ : List[Any] = vocab_size
a__ : List[str] = hidden_size
a__ : int = num_hidden_layers
a__ : Any = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : Any = num_labels
a__ : List[Any] = num_choices
a__ : Optional[int] = scope
a__ : Tuple = projection_dim
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a__ : Tuple = None
if self.use_token_type_ids:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Tuple = None
a__ : List[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : List[str] = ids_tensor([self.batch_size] , self.num_choices)
a__ : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
a__ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = TFDPRContextEncoder(config=lowercase)
a__ : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Union[str, Any] = model(lowercase , token_type_ids=lowercase)
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = TFDPRQuestionEncoder(config=lowercase)
a__ : Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Optional[Any] = model(lowercase , token_type_ids=lowercase)
a__ : str = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = TFDPRReader(config=lowercase)
a__ : Tuple = model(lowercase , attention_mask=lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = config_and_inputs
a__ : List[str] = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A : Tuple = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
__A : List[str] = False
__A : Any = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : List[Any] = False
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = TFDPRModelTester(self)
a__ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int = TFDPRQuestionEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = TFDPRReader.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
a__ : Tuple = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
a__ : List[str] = model(lowercase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a__ : int = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 392 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Tuple = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "align_text_model"
def __init__( self : Tuple , lowerCamelCase : Optional[Any]=30522 , lowerCamelCase : List[Any]=768 , lowerCamelCase : Any=12 , lowerCamelCase : Tuple=12 , lowerCamelCase : Dict=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Any=512 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Optional[int]="absolute" , lowerCamelCase : List[str]=True , **lowerCamelCase : Dict , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : List[Any] = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : List[Any] = position_embedding_type
__snake_case : List[Any] = use_cache
__snake_case : str = pad_token_id
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__snake_case : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "align_vision_model"
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.0_01 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.2 , **lowerCamelCase : Optional[Any] , ) -> List[Any]:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = num_channels
__snake_case : int = image_size
__snake_case : Any = width_coefficient
__snake_case : List[Any] = depth_coefficient
__snake_case : Any = depth_divisor
__snake_case : Union[str, Any] = kernel_sizes
__snake_case : Union[str, Any] = in_channels
__snake_case : int = out_channels
__snake_case : Tuple = depthwise_padding
__snake_case : List[str] = strides
__snake_case : Optional[int] = num_block_repeats
__snake_case : Tuple = expand_ratios
__snake_case : List[Any] = squeeze_expansion_ratio
__snake_case : int = hidden_act
__snake_case : int = hidden_dim
__snake_case : List[str] = pooling_type
__snake_case : Optional[int] = initializer_range
__snake_case : str = batch_norm_eps
__snake_case : Union[str, Any] = batch_norm_momentum
__snake_case : Dict = drop_connect_rate
__snake_case : Dict = sum(lowerCamelCase ) * 4
@classmethod
def __snake_case ( cls : Any , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
__snake_case , __snake_case : List[Any] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__snake_case : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = "align"
__UpperCAmelCase : Optional[int] = True
def __init__( self : str , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Tuple=640 , lowerCamelCase : List[Any]=1.0 , lowerCamelCase : Dict=0.02 , **lowerCamelCase : Tuple , ) -> str:
super().__init__(**lowerCamelCase )
if text_config is None:
__snake_case : Dict = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
__snake_case : Union[str, Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
__snake_case : Union[str, Any] = AlignTextConfig(**lowerCamelCase )
__snake_case : List[Any] = AlignVisionConfig(**lowerCamelCase )
__snake_case : List[Any] = projection_dim
__snake_case : Optional[int] = temperature_init_value
__snake_case : Any = initializer_range
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : AlignTextConfig , lowerCamelCase : AlignVisionConfig , **lowerCamelCase : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
__snake_case : List[Any] = self.text_config.to_dict()
__snake_case : List[Any] = self.vision_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 81 | '''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase__ : str = get_tests_dir('fixtures')
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = mock.Mock()
UpperCAmelCase__ : Dict = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = HTTPError
UpperCAmelCase__ : Tuple = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' ,subfolder='''feature_extractor''' )
self.assertIsNotNone(lowerCamelCase_ )
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''test-image-processor''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''valid_org/test-image-processor-org''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase__ : List[str] = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} ,)
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'''CustomImageProcessor''' )
| 614 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case):
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(A_ , """depth_multiplier""" ) )
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =depth_multiplier
lowerCamelCase_ =min_depth
lowerCamelCase_ =tf_padding
lowerCamelCase_ =int(last_hidden_size * depth_multiplier )
lowerCamelCase_ =output_stride
lowerCamelCase_ =hidden_act
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =use_labels
lowerCamelCase_ =is_training
lowerCamelCase_ =num_labels
lowerCamelCase_ =initializer_range
lowerCamelCase_ =scope
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self )-> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ =model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase):
_UpperCamelCase:List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCamelCase:Optional[int] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:List[str] = False
_UpperCamelCase:int = False
_UpperCamelCase:int = False
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =MobileNetVaModelTester(self )
lowerCamelCase_ =MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _snake_case ( self )-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def _snake_case ( self )-> Dict:
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def _snake_case ( self )-> Any:
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def _snake_case ( self )-> Optional[int]:
pass
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(A_ )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _snake_case ( self )-> Union[str, Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ =outputs.hidden_states
lowerCamelCase_ =26
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(A_ , A_ , A_ )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def _snake_case ( self )-> Dict:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def _snake_case ( self )-> str:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(A_ )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**A_ )
# verify the logits
lowerCamelCase_ =torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 716 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.mean(1 )
# Centralize the data of class i
lowerCamelCase_ =data - column_reshape(_A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =np.dot(_A , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =features.mean(1 )
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.shape[1]
lowerCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
# Check if the features have been loaded
if features.any():
lowerCamelCase_ =features.mean(1 )
# Center the dataset
lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) )
lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1]
lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCamelCase_ , lowerCamelCase_ =eigh(
covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , )
lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A )
lowerCamelCase_ =svd_matrix[:, 0:dimensions]
lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
# Create dummy dataset with 2 classes and 3 features
lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCamelCase_ =np.array([0, 0, 0, 1, 1] )
lowerCamelCase_ =2
lowerCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =linear_discriminant_analysis(
_A , _A , _A , _A )
if isinstance(_A , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCamelCase_ =2
lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =principal_component_analysis(_A , _A )
if not np.allclose(_A , _A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : Optional[int]=False ) -> Optional[int]:
a__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[str]=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
a__ = ''
else:
a__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
a__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[
: config.hidden_size, :
]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( UpperCamelCase : List[Any] ) -> Any:
a__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def __magic_name__ ( UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> Optional[int]:
a__ = dct.pop(UpperCamelCase )
a__ = val
def __magic_name__ ( ) -> int:
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict=True ) -> Optional[Any]:
a__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a__ = 8
# set labels if required
if not base_model:
a__ = 1000
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(UpperCamelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a__ = 384
a__ = 1536
a__ = 12
a__ = 6
# load original model from torch hub
a__ = torch.hub.load('facebookresearch/dino:main' , UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a__ = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase )
a__ = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
if base_model:
a__ = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase ).eval()
else:
a__ = ViTForImageClassification(UpperCamelCase ).eval()
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
a__ = ViTImageProcessor()
a__ = image_processor(images=prepare_img() , return_tensors='pt' )
a__ = encoding['pixel_values']
a__ = model(UpperCamelCase )
if base_model:
a__ = original_model(UpperCamelCase )
assert torch.allclose(UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a__ = original_model(UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase , outputs.logits , atol=1E-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
a : List[str] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 273 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
a__ = LlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
a__ = True
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
a__ = True
a__ = True
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase(_lowercase , _lowercase , _lowercase , unittest.TestCase ):
__snake_case: Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__snake_case: Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__snake_case: List[Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case: List[Any] = False
__snake_case: List[str] = False
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = LlamaModelTester(self )
a__ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'single_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'multi_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ids_tensor([1, 1_0] , config.vocab_size )
a__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = {'type': scaling_type, 'factor': 10.0}
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
@require_torch
class lowercase(unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
a__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
a__ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# fmt: off
a__ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
a__ = 'Simply put, the theory of relativity states that '
a__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
a__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__SCREAMING_SNAKE_CASE )
# greedy generation outputs
a__ = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=6_4 , top_p=__SCREAMING_SNAKE_CASE , temperature=1 , do_sample=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( __UpperCAmelCase , unittest.TestCase ):
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
@property
def __a ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __a ( self : str ):
A = ort.SessionOptions()
A = False
return options
def __a ( self : int ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A = 'A red cat sitting on a park bench'
A = np.random.RandomState(0 )
A = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type='np' , )
A = output.images
A = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self : Optional[Any] ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A = 'A red cat sitting on a park bench'
A = np.random.RandomState(0 )
A = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type='np' , )
A = output.images
A = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 701 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """lilt"""
def __init__( self : Optional[Any] , _lowercase : Dict=30_522 , _lowercase : Any=768 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : str=3_072 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=512 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : Any=0 , _lowercase : List[str]="absolute" , _lowercase : Dict=None , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=1_024 , **_lowercase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = classifier_dropout
A = channel_shrink_ratio
A = max_ad_position_embeddings
| 91 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =VideoToVideoSDPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCamelCase =False
# No `output_type`.
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(snake_case__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : int=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'np'
SCREAMING_SNAKE_CASE = sd_pipe(**snake_case__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=snake_case__ )
SCREAMING_SNAKE_CASE = video.to('cuda' )
SCREAMING_SNAKE_CASE = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 439 |
import argparse
import os
import re
a_ : List[str] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ : Optional[Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
a_ : Optional[int] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : bool = False ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = content.split('\n' )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while line_idx < len(_UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_UpperCamelCase ) )
elif "\n".join(_UpperCamelCase ) != content:
return True
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames]
if not overwrite and any(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(_UpperCamelCase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
a_ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 439 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : int=None , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = parent
_snake_case : Union[str, Any] = config_class
_snake_case : Optional[int] = has_text_modality
_snake_case : Optional[int] = kwargs
_snake_case : str = common_properties
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = self.config_class(**self.inputs_dict )
_snake_case : Any = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase ):
try:
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(
getattr(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCamelCase , UpperCamelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase ):
try:
_snake_case : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCamelCase , UpperCamelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.config_class(**self.inputs_dict )
_snake_case : List[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : str = os.path.join(UpperCamelCase , 'config.json' )
config_first.to_json_file(UpperCamelCase )
_snake_case : str = self.config_class.from_json_file(UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase )
_snake_case : Dict = self.config_class.from_pretrained(UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
_snake_case : Optional[int] = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
config_first.save_pretrained(UpperCamelCase )
_snake_case : Optional[Any] = self.config_class.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_snake_case : List[str] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.config_class.is_composition:
return
_snake_case : Tuple = self.config_class()
self.parent.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = copy.deepcopy(UpperCamelCase )
_snake_case : int = self.config_class(**UpperCamelCase )
_snake_case : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase , UpperCamelCase ) != value:
wrong_values.append((key, getattr(UpperCamelCase , UpperCamelCase ), value) )
if len(UpperCamelCase ) > 0:
_snake_case : Union[str, Any] = '\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
'''simple docstring'''
from math import sqrt
def _a (lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = 0
__snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase = random.Random()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase__ : Tuple = global_rng
UpperCAmelCase__ : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] ,A : List[Any] ,A : int=7 ,A : List[str]=400 ,A : int=2_000 ,A : Optional[int]=24 ,A : Optional[Any]=24 ,A : List[str]=0.0 ,A : List[str]=16_000 ,A : int=True ,A : str=True ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = min_seq_length
UpperCAmelCase__ : Dict = max_seq_length
UpperCAmelCase__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Dict = feature_size
UpperCAmelCase__ : Any = num_mel_bins
UpperCAmelCase__ : Any = padding_value
UpperCAmelCase__ : Any = sampling_rate
UpperCAmelCase__ : List[str] = return_attention_mask
UpperCAmelCase__ : List[str] = do_normalize
def __lowercase ( self : Dict ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowercase ( self : Dict ,A : Optional[Any]=False ,A : Tuple=False ):
'''simple docstring'''
def _flatten(A : Any ):
return list(itertools.chain(*A ) )
if equal_length:
UpperCAmelCase__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def __lowercase ( self : List[str] ,A : int ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(A ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A ,axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : Optional[int] = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase__ : Optional[int] = feature_extractor(A ,padding=A ,return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase__ : List[Any] = feature_extractor(speech_inputs[0] ,return_tensors="""np""" ).input_features
UpperCAmelCase__ : int = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A ,A ,atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : int = feature_extractor(A ,return_tensors="""np""" ).input_features
UpperCAmelCase__ : Optional[int] = feature_extractor(A ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A ,A ):
self.assertTrue(np.allclose(A ,A ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Optional[Any] = np.asarray(A )
UpperCAmelCase__ : List[str] = feature_extractor(A ,return_tensors="""np""" ).input_features
UpperCAmelCase__ : List[str] = feature_extractor(A ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A ,A ):
self.assertTrue(np.allclose(A ,A ,atol=1e-3 ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Dict = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : Dict = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase__ : List[Any] = [None, 16, None]
for max_length, padding in zip(A ,A ):
UpperCAmelCase__ : int = feature_extractor(
A ,padding=A ,max_length=A ,return_attention_mask=A )
UpperCAmelCase__ : List[Any] = inputs.input_features
UpperCAmelCase__ : Any = inputs.attention_mask
UpperCAmelCase__ : int = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : str = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase__ : List[str] = [None, 16, None]
for max_length, padding in zip(A ,A ):
UpperCAmelCase__ : int = feature_extractor(
A ,max_length=A ,padding=A ,return_tensors="""np""" ,return_attention_mask=A )
UpperCAmelCase__ : int = inputs.input_features
UpperCAmelCase__ : Optional[int] = inputs.attention_mask
UpperCAmelCase__ : Union[str, Any] = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : Union[str, Any] = feature_extractor(
A ,padding="""max_length""" ,max_length=4 ,truncation=A ,return_tensors="""np""" ,return_attention_mask=A ,)
UpperCAmelCase__ : Optional[Any] = inputs.input_features
UpperCAmelCase__ : Optional[Any] = inputs.attention_mask
UpperCAmelCase__ : Any = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : Any = feature_extractor(
A ,padding="""longest""" ,max_length=4 ,truncation=A ,return_tensors="""np""" ,return_attention_mask=A ,)
UpperCAmelCase__ : str = inputs.input_features
UpperCAmelCase__ : Union[str, Any] = inputs.attention_mask
UpperCAmelCase__ : Tuple = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
UpperCAmelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ : Tuple = feature_extractor(
A ,padding="""longest""" ,max_length=16 ,truncation=A ,return_tensors="""np""" ,return_attention_mask=A ,)
UpperCAmelCase__ : List[str] = inputs.input_features
UpperCAmelCase__ : Optional[Any] = inputs.attention_mask
UpperCAmelCase__ : Optional[int] = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Optional[int] = np.random.rand(100 ,32 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase__ : List[str] = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase__ : Optional[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __lowercase ( self : str ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : List[Any] = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
UpperCAmelCase__ : Tuple = self._load_datasamples(1 )
UpperCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : int = feature_extractor(A ,return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape ,(1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,A ,atol=1e-4 ) )
| 194 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(range(len(__UpperCamelCase ) ) )
UpperCAmelCase__ : Union[str, Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : Optional[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_lowercase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowercase = 6
_lowercase = 1
_lowercase = 1901
_lowercase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowercase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowercase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowercase = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowercase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[int] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 , __snake_case = 1 , __snake_case = 1.0E4 , __snake_case = False , __snake_case = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase_ : Tuple = float(embedding_dim // 2 )
UpperCAmelCase_ : List[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_ : List[Any] = min_timescale * jnp.exp(jnp.arange(__snake_case , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_ : Dict = jnp.expand_dims(__snake_case , 1 ) * jnp.expand_dims(__snake_case , 0 )
# scale embeddings
UpperCAmelCase_ : List[str] = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_ : Any = jnp.concatenate([jnp.cos(__snake_case ), jnp.sin(__snake_case )] , axis=1 )
else:
UpperCAmelCase_ : Tuple = jnp.concatenate([jnp.sin(__snake_case ), jnp.cos(__snake_case )] , axis=1 )
UpperCAmelCase_ : Any = jnp.reshape(__snake_case , [jnp.shape(__snake_case )[0], embedding_dim] )
return signal
class snake_case_ (nn.Module ):
"""simple docstring"""
_lowerCamelCase = 32
_lowerCamelCase = jnp.floataa
@nn.compact
def __call__( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="linear_1")(lowercase)
UpperCAmelCase_ : int = nn.silu(lowercase)
UpperCAmelCase_ : int = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="linear_2")(lowercase)
return temb
class snake_case_ (nn.Module ):
"""simple docstring"""
_lowerCamelCase = 32
_lowerCamelCase = False
_lowerCamelCase = 1
@nn.compact
def __call__( self ,lowercase):
"""simple docstring"""
return get_sinusoidal_embeddings(
lowercase ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift)
| 455 |
def _snake_case ( __snake_case , __snake_case , __snake_case ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Any = len(__snake_case )
UpperCAmelCase_ : Tuple = [[0] * n for i in range(__snake_case )]
for i in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = y_points[i]
for i in range(2 , __snake_case ):
for j in range(__snake_case , __snake_case ):
UpperCAmelCase_ : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 1 |
"""simple docstring"""
import sys
UpperCAmelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCamelCase (a_ :str = N) -> int:
lowercase :Optional[int] = -sys.maxsize - 1
for i in range(len(a_) - 12):
lowercase :Tuple = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
lowercase :Optional[Any] = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 677 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'unispeech-sat'
def __init__( self :List[Any] , lowerCamelCase_ :Any=3_2 , lowerCamelCase_ :int=7_6_8 , lowerCamelCase_ :Any=1_2 , lowerCamelCase_ :str=1_2 , lowerCamelCase_ :Optional[Any]=3_0_7_2 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.02 , lowerCamelCase_ :Union[str, Any]=1e-5 , lowerCamelCase_ :Dict="group" , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_ :List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_ :str=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Union[str, Any]=1_2_8 , lowerCamelCase_ :Any=1_6 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Any=True , lowerCamelCase_ :Optional[Any]=0.05 , lowerCamelCase_ :List[Any]=1_0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :int=1_0 , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[int]=3_2_0 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :str=1_0_0 , lowerCamelCase_ :Dict=2_5_6 , lowerCamelCase_ :List[Any]=2_5_6 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[str]="mean" , lowerCamelCase_ :Dict=False , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=2_5_6 , lowerCamelCase_ :Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_ :List[Any]=(5, 3, 3, 1, 1) , lowerCamelCase_ :Dict=(1, 2, 3, 1, 1) , lowerCamelCase_ :Any=5_1_2 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :List[str]=5_0_4 , **lowerCamelCase_ :Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_norm
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_clusters
UpperCamelCase__ = do_stable_layer_norm
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ = num_codevectors_per_group
UpperCamelCase__ = num_codevector_groups
UpperCamelCase__ = contrastive_logits_temperature
UpperCamelCase__ = feat_quantizer_dropout
UpperCamelCase__ = num_negatives
UpperCamelCase__ = codevector_dim
UpperCamelCase__ = proj_codevector_dim
UpperCamelCase__ = diversity_loss_weight
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = xvector_output_dim
@property
def lowerCamelCase__ ( self :List[str] ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 304 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = ['pixel_values']
def __init__( self :str , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , **lowerCamelCase_ :int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Dict , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ , size=(size["height"], size["width"]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :List[Any] , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size["height"], size["width"]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :int , ) -> str:
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :ImageInput , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :float = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) | 304 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = LEDConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : str = '''gelu'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=4 , ) -> List[Any]:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : str = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = eos_token_id
__lowerCamelCase : Union[str, Any] = pad_token_id
__lowerCamelCase : Optional[Any] = bos_token_id
__lowerCamelCase : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : Dict = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
__lowerCamelCase : Tuple = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
__lowerCamelCase : List[Any] = global_attention_mask
return config, inputs_dict
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = TFLEDModel(config=_lowercase ).get_decoder()
__lowerCamelCase : Optional[int] = inputs_dict['''input_ids''']
__lowerCamelCase : Dict = input_ids[:1, :]
__lowerCamelCase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase : Optional[int] = 1
# first forward pass
__lowerCamelCase : Optional[Any] = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
__lowerCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : str = model(_lowercase , attention_mask=_lowercase )[0]
__lowerCamelCase : Optional[int] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , ) -> List[str]:
if attention_mask is None:
__lowerCamelCase : Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ (__a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : Union[str, Any] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : str = True
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Dict = TFLEDModelTester(self )
__lowerCamelCase : Any = ConfigTester(self , config_class=_lowercase )
def lowercase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : str = tf.zeros_like(inputs_dict['attention_mask'] )
__lowerCamelCase : Any = 2
__lowerCamelCase : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__lowerCamelCase : int = True
__lowerCamelCase : str = self.model_tester.seq_length
__lowerCamelCase : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Tuple = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Union[str, Any] = model_class(_lowercase )
__lowerCamelCase : Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
__lowerCamelCase : List[str] = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
__lowerCamelCase : Optional[int] = model_class(_lowercase )
__lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : List[str] = model_class(_lowercase )
__lowerCamelCase : Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
__lowerCamelCase : str = True
__lowerCamelCase : Dict = True
__lowerCamelCase : int = model_class(_lowercase )
__lowerCamelCase : Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def lowercase_ ( self ) -> Optional[Any]:
pass
def lowercase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> List[str]:
return tf.constant(UpperCAmelCase_ , dtype=tf.intaa )
A__ : Dict = 1e-4
@slow
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__lowerCamelCase : List[str] = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase : int = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase : Union[str, Any] = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
__lowerCamelCase : Optional[Any] = model(**_lowercase )[0]
__lowerCamelCase : Optional[int] = (1, 10_24, 7_68)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
__lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__lowerCamelCase : Dict = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase : List[str] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase : Tuple = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
__lowerCamelCase : Any = model(**_lowercase )[0]
__lowerCamelCase : Dict = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
__lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 )
| 13 | import torch
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
if torch.cuda.is_available():
_UpperCamelCase: Any = torch.cuda.device_count()
else:
_UpperCamelCase: Union[str, Any] = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main() | 271 | 0 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['''onnx''']
def __init__( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def lowercase__ ( cls : List[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 559 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Any = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = seq_length
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Optional[int] = use_attention_mask
__lowerCamelCase : Tuple = use_token_type_ids
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : Union[str, Any] = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[Any] = None
if self.use_attention_mask:
__lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : List[str] = None
if self.use_token_type_ids:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : str = True
__a : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
__lowerCamelCase : str = FlaxRobertaModelTester(self )
@slow
def snake_case_ ( self ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
__lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 594 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Any = 'encodec'
def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=24000 , __a=1 , __a=False , __a=None , __a=None , __a=128 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=1024 , __a=None , __a=True , **__a , ):
__lowerCamelCase : Optional[int] = target_bandwidths
__lowerCamelCase : Dict = sampling_rate
__lowerCamelCase : Tuple = audio_channels
__lowerCamelCase : List[Any] = normalize
__lowerCamelCase : List[str] = chunk_length_s
__lowerCamelCase : Optional[int] = overlap
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Tuple = num_filters
__lowerCamelCase : Optional[Any] = num_residual_layers
__lowerCamelCase : List[Any] = upsampling_ratios
__lowerCamelCase : int = norm_type
__lowerCamelCase : str = kernel_size
__lowerCamelCase : Tuple = last_kernel_size
__lowerCamelCase : str = residual_kernel_size
__lowerCamelCase : Tuple = dilation_growth_rate
__lowerCamelCase : Any = use_causal_conv
__lowerCamelCase : str = pad_mode
__lowerCamelCase : List[str] = compress
__lowerCamelCase : int = num_lstm_layers
__lowerCamelCase : str = trim_right_ratio
__lowerCamelCase : Optional[int] = codebook_size
__lowerCamelCase : Any = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase : Tuple = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__a )
@property
def snake_case_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ):
__lowerCamelCase : str = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 594 | 1 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"""emoji""": True,
},
}
]
lowerCamelCase = 0
for log in Path().glob("""*.log"""):
lowerCamelCase = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase = F'''{line["duration"]:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase = []
log.unlink()
lowerCamelCase = """"""
lowerCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase = []
lowerCamelCase = {}
for test in failed_tests:
lowerCamelCase = test[0].split("""::""")
lowerCamelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase = [test[0] for test in failed_table]
lowerCamelCase = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase = len(err) + 1_0
lowerCamelCase = message[: 3_0_0_0 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
lowerCamelCase = """No failed tests! 🤗"""
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
lowerCamelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCamelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase = row[0]
else:
lowerCamelCase = """"""
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 713 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 128
elif "12-12" in model_name:
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
_SCREAMING_SNAKE_CASE = 14
_SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('''Model not supported''' )
_SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 35
_SCREAMING_SNAKE_CASE = '''speech-commands-v2-id2label.json'''
else:
_SCREAMING_SNAKE_CASE = 527
_SCREAMING_SNAKE_CASE = '''audioset-id2label.json'''
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_SCREAMING_SNAKE_CASE = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if "module.v" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split('''.''' )
_SCREAMING_SNAKE_CASE = int(key_split[3] )
_SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
@torch.no_grad()
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )
# remove some keys
remove_keys(UpperCamelCase__ )
# rename some keys
_SCREAMING_SNAKE_CASE = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# load 🤗 model
_SCREAMING_SNAKE_CASE = ASTForAudioClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_SCREAMING_SNAKE_CASE = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
_SCREAMING_SNAKE_CASE = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
_SCREAMING_SNAKE_CASE = 1_024 if '''speech-commands''' not in model_name else 128
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=UpperCamelCase__ , std=UpperCamelCase__ , max_length=UpperCamelCase__ )
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_SCREAMING_SNAKE_CASE = dataset[0]['''audio''']['''array''']
else:
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = torchaudio.load(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
_SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase__ , sampling_rate=16_000 , return_tensors='''pt''' )
# forward pass
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 168 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
UpperCamelCase : Tuple = KandinskyVaaInpaintPipeline
UpperCamelCase : Dict = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCamelCase : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCamelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : Optional[int] = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 100
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ : str = UNetaDConditionModel(**_a )
return model
@property
def __snake_case ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.dummy_unet
UpperCAmelCase__ : Any = self.dummy_movq
UpperCAmelCase__ : Dict = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='epsilon' , thresholding=_a , )
UpperCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((256, 256) )
# create mask
UpperCAmelCase__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Any = 0
if str(_a ).startswith('mps' ):
UpperCAmelCase__ : Tuple = torch.manual_seed(_a )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase__ : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = 'cpu'
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**_a )
UpperCAmelCase__ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
UpperCAmelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
UpperCAmelCase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ : List[str] = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Any = 'a hat'
UpperCAmelCase__ : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
UpperCAmelCase__ : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Any = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
UpperCAmelCase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase__ : Union[str, Any] = pipeline(
image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 110 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
__lowerCAmelCase = 1_0_0
__lowerCAmelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__lowerCAmelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_a : set[int] = set()
_a : int
_a : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase_ (__a : int = 5_0_0_0 ):
"""simple docstring"""
for number_to_partition in range(1 , __a ):
if len(partition(__a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 229 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( _A: str , _A: str , _A: str , _A: PreTrainedTokenizer , _A: int , _A: Optional[int] = None , ):
'''simple docstring'''
__lowerCamelCase = {}
if train_file is not None:
__lowerCamelCase = [train_file]
if eval_file is not None:
__lowerCamelCase = [eval_file]
if test_file is not None:
__lowerCamelCase = [test_file]
__lowerCamelCase = datasets.load_dataset("""csv""" , data_files=_A )
__lowerCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase = features_name.pop(_A )
__lowerCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase = {label: i for i, label in enumerate(_A )}
__lowerCamelCase = tokenizer.model_input_names
__lowerCamelCase = {}
if len(_A ) == 1:
for k in files.keys():
__lowerCamelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_A , max_length=_A , padding="""max_length""" ) , batched=_A , )
elif len(_A ) == 2:
for k in files.keys():
__lowerCamelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_A , max_length=_A , padding="""max_length""" , ) , batched=_A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase = (
tf.data.Dataset.from_generator(
_A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_a : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(metadata={'''help''': '''Which column contains the label'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the training file'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the development file'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the test file'''} )
A = field(
default=128 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(default=__UpperCamelCase ,metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A = field(
default=__UpperCamelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
def compute_metrics(_A: EvalPrediction ) -> Dict:
__lowerCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase = TFTrainer(
model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(_A )
return results
if __name__ == "__main__":
main()
| 716 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : str , snake_case : int ):
'''simple docstring'''
snake_case_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
snake_case_ = f'{src_lang}-{tgt_lang}'
snake_case_ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , "README.md" )
print(f'Generating {path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(snake_case )
# make sure we are under the root of the project
_SCREAMING_SNAKE_CASE : str = Path(__file__).resolve().parent.parent.parent
_SCREAMING_SNAKE_CASE : List[Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_SCREAMING_SNAKE_CASE : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 400 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_SCREAMING_SNAKE_CASE : Any = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowercase_ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = "rag"
lowerCAmelCase_ : List[Any] = True
def __init__( self , a__=None , a__=True , a__=None , a__=None , a__=None , a__=None , a__=None , a__=" / " , a__=" // " , a__=5 , a__=300 , a__=768 , a__=8 , a__="wiki_dpr" , a__="train" , a__="compressed" , a__=None , a__=None , a__=False , a__=False , a__=0.0 , a__=True , a__=False , a__=False , a__=False , a__=True , a__=None , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(
bos_token_id=a__ , pad_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , is_encoder_decoder=a__ , prefix=a__ , vocab_size=a__ , **a__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ = kwargs.pop("question_encoder" )
snake_case_ = question_encoder_config.pop("model_type" )
snake_case_ = kwargs.pop("generator" )
snake_case_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
snake_case_ = AutoConfig.for_model(a__ , **a__ )
snake_case_ = AutoConfig.for_model(a__ , **a__ )
snake_case_ = reduce_loss
snake_case_ = label_smoothing
snake_case_ = exclude_bos_score
snake_case_ = do_marginalize
snake_case_ = title_sep
snake_case_ = doc_sep
snake_case_ = n_docs
snake_case_ = max_combined_length
snake_case_ = dataset
snake_case_ = dataset_split
snake_case_ = index_name
snake_case_ = retrieval_vector_size
snake_case_ = retrieval_batch_size
snake_case_ = passages_path
snake_case_ = index_path
snake_case_ = use_dummy_dataset
snake_case_ = output_retrieved
snake_case_ = do_deduplication
snake_case_ = use_cache
if self.forced_eos_token_id is None:
snake_case_ = getattr(self.generator , "forced_eos_token_id" , a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> PretrainedConfig:
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.question_encoder.to_dict()
snake_case_ = self.generator.to_dict()
snake_case_ = self.__class__.model_type
return output
| 400 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase = sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sag_pipe.to(UpperCamelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
UpperCamelCase = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 701 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCamelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : bool , UpperCamelCase__ : str = None , UpperCamelCase__ : list = None ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
UpperCamelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCamelCase__ ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
UpperCamelCase = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = '\n'.join(UpperCamelCase__ )
if special_strings is not None:
for string in special_strings:
UpperCamelCase = diff.replace(UpperCamelCase__ , '' )
self.assertEqual(UpperCamelCase__ , '' )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.one_complete_example('complete_nlp_example.py' , UpperCamelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
UpperCamelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A ( cls : List[Any] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
self.assertNotIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
if torch.cuda.is_available():
UpperCamelCase = torch.cuda.device_count()
else:
UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
else:
self.assertIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
UpperCamelCase = re.findall('({.+})' , UpperCamelCase__ )
UpperCamelCase = [r for r in results if 'accuracy' in r][-1]
UpperCamelCase = ast.literal_eval(UpperCamelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.7_5 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'tracking' ) ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 324 | 0 |
from __future__ import annotations
def UpperCamelCase (lowercase_: str , lowercase_: list[str] | None = None ) -> list[list[str]]:
A__ : Dict = word_bank or []
# create a table
A__ : int = len(lowercase_ ) + 1
A__ : list[list[list[str]]] = []
for _ in range(lowercase_ ):
table.append([] )
# seed value
A__ : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase_ )] == word:
A__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase_ )]:
combination.reverse()
return table[len(lowercase_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 456 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=64 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : int = parent
A__ : Optional[Any] = batch_size
A__ : Optional[Any] = seq_length
A__ : Any = is_training
A__ : Tuple = use_input_mask
A__ : Optional[int] = use_token_type_ids
A__ : Tuple = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Optional[Any] = embedding_size
A__ : Optional[int] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Tuple = intermediate_size
A__ : Tuple = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Optional[Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Optional[Any] = type_sequence_label_size
A__ : str = initializer_range
A__ : Any = num_labels
A__ : Dict = num_choices
A__ : List[str] = scope
def __A ( self ):
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[str] = None
if self.use_token_type_ids:
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any = None
A__ : str = None
A__ : Dict = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = MegatronBertModel(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : Optional[Any] = model(A__ , token_type_ids=A__ )
A__ : Dict = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = MegatronBertForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Tuple = MegatronBertForCausalLM(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = MegatronBertForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = MegatronBertForPreTraining(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = MegatronBertForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Union[str, Any] = MegatronBertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = self.num_labels
A__ : int = MegatronBertForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = self.num_choices
A__ : Dict = MegatronBertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Any = config_and_inputs
A__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: Tuple = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: int = True
# test_resize_embeddings = False
UpperCAmelCase__: List[str] = False
def __A ( self , A__ , A__ , A__=False ):
A__ : Union[str, Any] = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
A__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
A__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : Union[str, Any] = MegatronBertModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A__ )
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A__ )
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A__ )
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A__ )
def UpperCamelCase (lowercase_: Optional[int] ) -> List[Any]:
return torch.tensor(
lowercase_ , dtype=torch.long , device=lowercase_ , )
A_ : int = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __A ( self ):
A__ : int = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A__ : Dict = os.path.join(os.environ["""MYDIR"""] , A__ )
A__ : List[str] = MegatronBertModel.from_pretrained(A__ )
model.to(A__ )
model.half()
A__ : Union[str, Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
A__ : Dict = model(A__ )[0]
A__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A__ )
A__ : int = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
A__ : List[Any] = output[0, ii, jj]
A__ : Dict = expected[3 * ii + jj]
A__ : Dict = """ii={} jj={} a={} b={}""".format(A__ , A__ , A__ , A__ )
self.assertTrue(math.isclose(A__ , A__ , rel_tol=A__ , abs_tol=A__ ) , msg=A__ )
| 456 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: str = DanceDiffusionPipeline
a: Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a: str = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
a: Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a: int = False
a: List[Any] = False
def _A ( self: Optional[int] ):
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_a = IPNDMScheduler()
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _A ( self: Any , __UpperCamelCase: List[Any] , __UpperCamelCase: Any=0 ):
if str(__UpperCamelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCamelCase )
else:
_a = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_a = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _A ( self: Any ):
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = DanceDiffusionPipeline(**__UpperCamelCase )
_a = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = self.get_dummy_inputs(__UpperCamelCase )
_a = pipe(**__UpperCamelCase )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_a = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _A ( self: int ):
return super().test_save_load_local()
@skip_mps
def _A ( self: Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _A ( self: Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def _A ( self: Optional[int] ):
return super().test_attention_slicing_forward_pass()
def _A ( self: Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Dict ):
_a = torch_device
_a = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_a = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = torch.manual_seed(0 )
_a = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: Any ):
_a = torch_device
_a = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_a = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = torch.manual_seed(0 )
_a = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_a = output.audios
_a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 346 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase :Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase :List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase :List[str] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase :Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase :List[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase :int = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCamelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ) -> Union[str, Any]:
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
_a = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
_a = tf_models
_a = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
_a = flax_models
_a = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
_a = pt_models
_a = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(_UpperCamelCase )[:-1] )
_a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a = list(_UpperCamelCase )
all_models.sort()
_a = {'''model_type''': all_models}
_a = [pt_models[t] for t in all_models]
_a = [tf_models[t] for t in all_models]
_a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a = '''AutoTokenizer'''
_a = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> int:
_a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
_a = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
continue
# First extract all model_names
_a = []
for name in getattr(_UpperCamelCase , _UpperCamelCase ).values():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = get_frameworks_table()
_a = Dataset.from_pandas(_UpperCamelCase )
_a = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=_UpperCamelCase )
_a = Dataset.from_json(_UpperCamelCase )
_a = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(_UpperCamelCase ) )
}
_a = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a = sorted(table.keys() )
_a = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_a = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_a = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_a = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=_UpperCamelCase , repo_type='''dataset''' , token=_UpperCamelCase , commit_message=_UpperCamelCase , )
def __snake_case ( ) -> str:
_a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a = transformers_module.pipelines.SUPPORTED_TASKS
_a = []
for key in pipeline_tasks:
if key not in in_table:
_a = pipeline_tasks[key]['''pt''']
if isinstance(_UpperCamelCase , (list, tuple) ):
_a = model[0]
_a = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_a = ''', '''.join(_UpperCamelCase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase :Dict = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 346 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__A : Optional[int] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__A : Optional[Any] = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase : Any = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Any = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ ,snake_case_ ) )
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set()
UpperCamelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : List[Any] = char
return pairs
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase : List[Any] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {v: k for k, v in self.encoder.items()}
UpperCamelCase : str = errors # how to handle errors in decoding
UpperCamelCase : Any = bytes_to_unicode()
UpperCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Dict = {}
UpperCamelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
if token in self.cache:
return self.cache[token]
UpperCamelCase : Any = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : Tuple = bigram
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : List[Any] = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
UpperCamelCase : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = word
return word
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = """""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
UpperCamelCase : Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase : Optional[int] = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
UpperCamelCase : List[Any] = """ """ + text
return (text, kwargs)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Any = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase : Optional[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : str = KandinskyVaaControlnetPipeline
lowercase : Any = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase : List[str] = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase : Dict = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Dict = False
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 100
@property
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase : List[str] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.dummy_unet
UpperCamelCase : int = self.dummy_movq
UpperCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a_ ( self ):
UpperCamelCase : Dict = """cpu"""
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Union[str, Any] = output.images
UpperCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase : int = image[0, -3:, -3:, -1]
UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
UpperCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCamelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCamelCase : Any = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 255.0
UpperCamelCase : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCamelCase : str = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = """A robot, 4k photo"""
UpperCamelCase : Optional[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : List[Any] = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase : Dict = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , output_type="""np""" , )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 499 | 1 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 719 |
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_UpperCamelCase = features.copy()
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case, split=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = jsonl_path
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [jsonl_path]
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=("train",) ) -> str:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
for split in splits:
_UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path}, features=__snake_case, cache_dir=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if split:
_UpperCamelCase = {split: jsonl_path}
else:
_UpperCamelCase = '''train'''
_UpperCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return json.load(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
return [json.loads(__snake_case ) for line in buffer]
class _UpperCAmelCase:
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a).write()
buffer.seek(0)
_UpperCamelCase = load_json_function(__a)
assert isinstance(__a , __a)
assert isinstance(exported_content[0] , __a)
assert len(__a) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a).write()
buffer.seek(0)
_UpperCamelCase = load_json(__a)
assert isinstance(__a , __a)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__a) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , __a , __a , __a) -> int:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase = load_json_function(__a)
assert isinstance(__a , __a)
assert isinstance(exported_content[0] , __a)
assert len(__a) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase = load_json(__a)
assert isinstance(__a , __a)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__a) == 10
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
with pytest.raises(__a):
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = tmp_path_factory.mktemp('''data''') / F'''test.json.{extension}'''
_UpperCamelCase = str(shared_datadir / F'''test_file.json.{extension}''')
JsonDatasetWriter(__a , __a , compression=__a).write()
with fsspec.open(__a , '''rb''' , compression='''infer''') as f:
_UpperCamelCase = f.read()
with fsspec.open(__a , '''rb''' , compression='''infer''') as f:
_UpperCamelCase = f.read()
assert exported_content == original_content
| 19 |
'''simple docstring'''
import re
def __lowerCamelCase ( __lowerCAmelCase : str ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
snake_case = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : str ) -> str:
try:
snake_case = split_input(__lowerCAmelCase )
if upper:
snake_case = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return to_simple_case(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
try:
snake_case = to_simple_case(__lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """_""" )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """-""" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 369 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 188 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase = None
lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _a :
_lowercase : bool = True
_lowercase : Optional[str] = None
# Automatically constructed
_lowercase : ClassVar[str] = "PIL.Image.Image"
_lowercase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : str = field(default='''Image''' , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.pa_type
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowercase__ = {}
lowercase__ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
lowercase__ = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = path.split('''::''' )[-1]
try:
lowercase__ = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase__ = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
lowercase__ = None
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
lowercase__ = BytesIO(f.read() )
lowercase__ = PIL.Image.open(bytes_ )
else:
lowercase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase__ = storage.field('''bytes''' )
else:
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase__ = storage.field('''path''' )
else:
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase_: Any ):
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase__ = f.read()
return bytes_
lowercase__ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def _a ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = BytesIO()
if image.format in list_image_compression_formats():
lowercase__ = image.format
else:
lowercase__ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__A , format=__A )
return buffer.getvalue()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if hasattr(__A , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowercase__ = array.dtype
lowercase__ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase__ = dtype.kind
lowercase__ = dtype.itemsize
lowercase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ = dtype_byteorder + dtype_kind + str(__A )
lowercase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowercase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
lowercase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
lowercase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
def __init__( self : Optional[int] , __a : Dict , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : str=True , __a : int=True , __a : Any=True , __a : Dict=True , __a : Any=True , __a : str=False , __a : Any=False , __a : List[str]=False , __a : Any=2 , __a : Dict=99 , __a : List[Any]=0 , __a : Optional[Any]=32 , __a : List[Any]=5 , __a : Tuple=4 , __a : Optional[Any]=0.1 , __a : Dict=0.1 , __a : str=512 , __a : Dict=2 , __a : Any=0.0_2 , __a : Optional[Any]=2 , __a : int=4 , __a : Any="last" , __a : Optional[Any]=True , __a : Tuple=None , __a : Any=0 , ) -> int:
'''simple docstring'''
__snake_case : Tuple = parent
__snake_case : Optional[Any] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : List[Any] = is_training
__snake_case : Any = use_input_lengths
__snake_case : Dict = use_token_type_ids
__snake_case : List[Any] = use_labels
__snake_case : int = gelu_activation
__snake_case : str = sinusoidal_embeddings
__snake_case : str = causal
__snake_case : Dict = asm
__snake_case : Union[str, Any] = n_langs
__snake_case : List[str] = vocab_size
__snake_case : Optional[Any] = n_special
__snake_case : str = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = num_labels
__snake_case : Optional[Any] = num_choices
__snake_case : str = summary_type
__snake_case : List[Any] = use_proj
__snake_case : List[Any] = scope
__snake_case : Dict = bos_token_id
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = None
if self.use_input_lengths:
__snake_case : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : List[str] = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : List[str] = None
__snake_case : str = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Tuple = ids_tensor([self.batch_size] , 2 ).float()
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A_ ( self : Optional[Any] , __a : int , __a : List[str] , __a : Optional[int] , __a : Tuple , __a : str , __a : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , ) -> Any:
'''simple docstring'''
__snake_case : int = XLMModel(config=__a )
model.to(__a )
model.eval()
__snake_case : int = model(__a , lengths=__a , langs=__a )
__snake_case : Optional[int] = model(__a , langs=__a )
__snake_case : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Dict , __a : Any , __a : List[Any] , __a : Tuple , __a : Any , __a : int , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : Optional[Any] , ) -> List[str]:
'''simple docstring'''
__snake_case : str = XLMWithLMHeadModel(__a )
model.to(__a )
model.eval()
__snake_case : Optional[Any] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any] , __a : List[str] , __a : Optional[int] , __a : Tuple , ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = XLMForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
__snake_case : int = model(__a )
__snake_case : Optional[Any] = model(__a , start_positions=__a , end_positions=__a )
__snake_case : str = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[Any] , __a : int , __a : Any , __a : Dict , __a : Tuple , __a : Tuple , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : int , ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = XLMForQuestionAnswering(__a )
model.to(__a )
model.eval()
__snake_case : Dict = model(__a )
__snake_case : str = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
__snake_case : List[str] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
((__snake_case) , ) : Union[str, Any] = result_with_labels.to_tuple()
__snake_case : int = model(__a , start_positions=__a , end_positions=__a )
((__snake_case) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A_ ( self : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : List[Any] , __a : Dict , __a : Dict , __a : Tuple , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = XLMForSequenceClassification(__a )
model.to(__a )
model.eval()
__snake_case : Any = model(__a )
__snake_case : List[Any] = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[str] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : str , ) -> str:
'''simple docstring'''
__snake_case : List[str] = self.num_labels
__snake_case : int = XLMForTokenClassification(__a )
model.to(__a )
model.eval()
__snake_case : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : str , __a : str , __a : str , __a : Optional[int] , __a : Optional[int] , __a : Tuple , __a : Any , __a : Dict , __a : Optional[Any] , __a : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = self.num_choices
__snake_case : Tuple = XLMForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__snake_case : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : Optional[int] , __a : int , __a : int , __a : Tuple , __a : int , __a : Optional[int] ) -> Dict:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self : Optional[Any] , __a : str , __a : List[str] , __a : Union[str, Any]=False ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__snake_case : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
__snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = XLMModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__a , emb_dim=37 )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__a )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__a )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__a )
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__a )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__a )
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__a )
def A_ ( self : Tuple , __a : str , __a : Any , __a : int , __a : List[str] , __a : Optional[int] , __a : str=False , __a : Any=1 ) -> List[Any]:
'''simple docstring'''
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__a ):
# adds PAD dummy token
__snake_case : str = min_length + idx + 1
__snake_case : Union[str, Any] = min_length + idx + 1
__snake_case : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) )
def A_ ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : str , __a : str , __a : List[Any] , __a : Optional[int]=False , __a : int=1 ) -> List[Any]:
'''simple docstring'''
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__a ):
# adds PAD dummy token
__snake_case : Any = min_length + idx + 1
__snake_case : Optional[int] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , )
pass
@slow
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = XLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__a )
__snake_case : List[str] = torch.tensor([[14, 447]] , dtype=torch.long , device=__a ) # the president
__snake_case : Optional[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__snake_case : str = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
| 124 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A__ : Tuple = random.Random()
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any]=1.0 ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if rng is None:
__snake_case : Any = global_rng
__snake_case : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self : Tuple , __a : Optional[Any] , __a : Optional[int]=7 , __a : Any=400 , __a : str=2000 , __a : Union[str, Any]=1 , __a : Union[str, Any]=0.0 , __a : Tuple=16000 , __a : str=True , __a : int=True , ) -> Any:
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : List[str] = batch_size
__snake_case : List[str] = min_seq_length
__snake_case : Tuple = max_seq_length
__snake_case : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case : List[Any] = feature_size
__snake_case : List[Any] = padding_value
__snake_case : Tuple = sampling_rate
__snake_case : Tuple = return_attention_mask
__snake_case : Dict = do_normalize
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A_ ( self : List[Any] , __a : str=False , __a : Optional[int]=False ) -> str:
'''simple docstring'''
def _flatten(__a : Dict ):
return list(itertools.chain(*__a ) )
if equal_length:
__snake_case : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__snake_case : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case : Optional[Any] = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = WavaVecaFeatureExtractor
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = WavaVecaFeatureExtractionTester(self )
def A_ ( self : Dict , __a : Tuple ) -> Any:
'''simple docstring'''
self.assertTrue(np.all(np.mean(__a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a , axis=0 ) - 1 ) < 1e-3 ) )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case : Optional[Any] = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
__snake_case : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__snake_case : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
__snake_case : Union[str, Any] = feat_extract(__a , return_tensors='np' ).input_values
__snake_case : Tuple = feat_extract(__a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case : List[str] = np.asarray(__a )
__snake_case : Union[str, Any] = feat_extract(__a , return_tensors='np' ).input_values
__snake_case : List[Any] = feat_extract(__a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case : Dict = ['longest', 'max_length', 'do_not_pad']
__snake_case : Any = [None, 1600, None]
for max_length, padding in zip(__a , __a ):
__snake_case : Any = feat_extract(__a , padding=__a , max_length=__a , return_tensors='np' )
__snake_case : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : List[Any] = range(800 , 1400 , 200 )
__snake_case : Any = [floats_list((1, x) )[0] for x in lengths]
__snake_case : Tuple = ['longest', 'max_length', 'do_not_pad']
__snake_case : Dict = [None, 1600, None]
for max_length, padding in zip(__a , __a ):
__snake_case : str = feat_extract(__a , max_length=__a , padding=__a )
__snake_case : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case : Any = feat_extract(
__a , truncation=__a , max_length=1000 , padding='max_length' , return_tensors='np' )
__snake_case : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case : Union[str, Any] = feat_extract(
__a , truncation=__a , max_length=1000 , padding='longest' , return_tensors='np' )
__snake_case : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__snake_case : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case : Any = feat_extract(
__a , truncation=__a , max_length=2000 , padding='longest' , return_tensors='np' )
__snake_case : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
import torch
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Dict = np.random.rand(100 ).astype(np.floataa )
__snake_case : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case : Dict = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__snake_case : List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__snake_case : List[str] = WavaVecaConfig.from_pretrained(__a )
__snake_case : str = WavaVecaFeatureExtractor.from_pretrained(__a )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 124 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
_SCREAMING_SNAKE_CASE = '▁'
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """token_type_ids"""]
__lowerCAmelCase = FNetTokenizer
def __init__( self : Any , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]="<unk>" , lowerCamelCase_ : Dict="[SEP]" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : str="[CLS]" , lowerCamelCase_ : Optional[int]="[MASK]" , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 537 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """Pix2StructImageProcessor"""
_UpperCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 2_0_4_8 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ : Any = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ : List[str] = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 101 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = 'UperNetConfig'
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int], str] = 0 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
a_ : Optional[Any] = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
a_ : Any = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
a_ : Any = self.conv(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.batch_norm(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
super().__init__()
a_ : Union[str, Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
a_ : str = input
for layer in self.layers:
a_ : List[Any] = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple[int, ...] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool ) -> None:
super().__init__()
a_ : Optional[int] = pool_scales
a_ : Any = align_corners
a_ : Optional[int] = in_channels
a_ : int = channels
a_ : Dict = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : Any = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[torch.Tensor]:
a_ : List[Any] = []
for ppm in self.blocks:
a_ : str = ppm(__SCREAMING_SNAKE_CASE )
a_ : Dict = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
super().__init__()
a_ : Optional[Any] = config
a_ : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : str = in_channels
a_ : Optional[int] = config.hidden_size
a_ : str = False
a_ : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : Tuple = nn.ModuleList()
a_ : Any = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : Dict = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
a_ : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> Any:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : Tuple = inputs[-1]
a_ : List[str] = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
a_ : Dict = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
a_ : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
# build laterals
a_ : Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
a_ : Any = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Optional[Any] = laterals[i - 1].shape[2:]
a_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
a_ : Tuple = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
a_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
a_ : List[str] = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
a_ : str = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
a_ : Any = config
a_ : Optional[Any] = config.auxiliary_in_channels
a_ : Optional[Any] = config.auxiliary_channels
a_ : Tuple = config.auxiliary_num_convs
a_ : Tuple = config.auxiliary_concat_input
a_ : Dict = in_index
a_ : str = (kernel_size // 2) * dilation
a_ : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
a_ : Optional[int] = nn.Identity()
else:
a_ : Optional[int] = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
a_ : str = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
a_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
a_ : str = encoder_hidden_states[self.in_index]
a_ : Optional[int] = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
a_ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : str = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = UperNetConfig
snake_case__ = "pixel_values"
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = value
__lowerCAmelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[str] = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
a_ : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = outputs.feature_maps
a_ : Optional[Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : Any = None
if self.auxiliary_head is not None:
a_ : Optional[int] = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
a_ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : List[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : Union[str, Any] = (logits,) + outputs[1:]
else:
a_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Union[str, Any] , **A_ : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : Tuple ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[int] ) -> str:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Any ) -> Any:
__snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase ( self : Any ) -> int:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowercase ( self : Tuple ) -> Any:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = [torch.ones((1, 3, 5, 5) )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(A_ , A_ , A_ )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = processor.post_process_masks(
A_ , torch.tensor(A_ ) , torch.tensor(A_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__snake_case = [np.ones((1, 3, 5, 5) )]
__snake_case = processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = [[1, 0], [0, 1]]
with self.assertRaises(A_ ):
__snake_case = processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) )
@require_vision
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Tuple:
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Optional[Any] , **A_ : Tuple ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : int ) -> str:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase ( self : List[str] ) -> List[str]:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowercase ( self : Optional[Any] ) -> List[Any]:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = [tf.ones((1, 3, 5, 5) )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(A_ , A_ , A_ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = processor.post_process_masks(
A_ , tf.convert_to_tensor(A_ ) , tf.convert_to_tensor(A_ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__snake_case = [np.ones((1, 3, 5, 5) )]
__snake_case = processor.post_process_masks(
A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case = processor.post_process_masks(
A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> Tuple:
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Dict , **A_ : Tuple ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : List[Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase ( self : Optional[int] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case = [tf.convert_to_tensor(A_ )]
__snake_case = [torch.tensor(A_ )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(
A_ , A_ , A_ , return_tensors='''tf''' )
__snake_case = processor.post_process_masks(
A_ , A_ , A_ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase ( self : Optional[int] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__snake_case = processor(images=A_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__snake_case = image_processor(A_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
__snake_case = processor(images=A_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertTrue(np.allclose(A_ , A_ ) ) | 564 | """simple docstring"""
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
__snake_case = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = f"class {class_name}("
__snake_case = f"{4 * ' '}def {test_name}("
__snake_case = f"{8 * ' '}{correct_line.split()[0]}"
__snake_case = f"{16 * ' '}{correct_line.split()[0]}"
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = 0
__snake_case = []
for line in lines:
if line.startswith(snake_case):
__snake_case = True
elif in_class and line.startswith(snake_case):
__snake_case = True
elif in_class and in_func and (line.startswith(snake_case) or line.startswith(snake_case)):
__snake_case = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
__snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
__snake_case = __snake_case = __snake_case = __snake_case = False
else:
new_lines.append(snake_case)
with open(snake_case, '''w''') as f:
for line in new_lines:
f.write(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=None):
if fail is not None:
with open(snake_case, '''r''') as f:
__snake_case = {l.strip() for l in f.readlines()}
else:
__snake_case = None
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = defaultdict(snake_case)
for line in correct_lines:
__snake_case , __snake_case , __snake_case , __snake_case = line.split(''';''')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(snake_case, snake_case, snake_case, snake_case, snake_case)
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__lowercase : Union[str, Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 564 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
model.train()
lowerCamelCase_ = model(lowerCamelCase__ )
lowerCamelCase_ = F.mse_loss(lowerCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
set_seed(4_2 )
lowerCamelCase_ = RegressionModel()
lowerCamelCase_ = deepcopy(lowerCamelCase__ )
lowerCamelCase_ = RegressionDataset(length=8_0 )
lowerCamelCase_ = DataLoader(lowerCamelCase__ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowerCamelCase_ = AdamW(params=model.parameters() , lr=1e-3 )
lowerCamelCase_ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCamelCase_ = LambdaLR(lowerCamelCase__ , lr_lambda=lambda lowerCamelCase__ : epoch**0.65 )
lowerCamelCase_ = LambdaLR(lowerCamelCase__ , lr_lambda=lambda lowerCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase_ ( lowerCamelCase__ ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_training_setup(lowerCamelCase__ )
# Use a single batch
lowerCamelCase_ , lowerCamelCase_ = next(iter(lowerCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ , lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase__ ):
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
# Sync grads
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(lowerCamelCase__ ) )]
def lowerCamelCase_ ( lowerCamelCase__ ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_training_setup(lowerCamelCase__ )
# Use a single batch
lowerCamelCase_ , lowerCamelCase_ = next(iter(lowerCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ , lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase__ ):
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
# Sync grads
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(lowerCamelCase__ ) )]
def lowerCamelCase_ ( lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = Accelerator(
split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_training_setup(lowerCamelCase__ )
for iteration, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ , lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase__ ):
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(lowerCamelCase__ ) )]
GradientState._reset_state()
def lowerCamelCase_ ( lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = Accelerator(
split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_training_setup(lowerCamelCase__ , lowerCamelCase__ )
for iteration, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ , lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase__ ):
step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowerCamelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def lowerCamelCase_ ( ):
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = RegressionDataset(length=8_0 )
lowerCamelCase_ = DataLoader(lowerCamelCase__ , batch_size=1_6 )
lowerCamelCase_ = RegressionDataset(length=9_6 )
lowerCamelCase_ = DataLoader(lowerCamelCase__ , batch_size=1_6 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase__ )
if iteration < len(lowerCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase__ )
if batch_num < len(lowerCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase_ ( ):
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCamelCase__ , lowerCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 313 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowerCamelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowerCamelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = "vit.encoder.layer."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
elif "huge" in checkpoint_url:
lowerCamelCase_ = 1_4
lowerCamelCase_ = 1_2_8_0
lowerCamelCase_ = 5_1_2_0
lowerCamelCase_ = 3_2
lowerCamelCase_ = 1_6
lowerCamelCase_ = ViTMAEForPreTraining(lowerCamelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase_ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.