code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Dict = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = ["OwlViTFeatureExtractor"]
lowerCamelCase__ : List[str] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 246
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = StableDiffusionInstructPixaPixPipeline
_SCREAMING_SNAKE_CASE :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_SCREAMING_SNAKE_CASE :Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :int = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE :List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE__ : str = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : str = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : int = """french fries"""
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(**_a , negative_prompt=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : List[str] = [inputs["""prompt"""]] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(_a ).unsqueeze(0 ).to(_a )
SCREAMING_SNAKE_CASE__ : Any = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Any = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : List[str] = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = [round(_a , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = VaeImageProcessor(do_resize=_a , do_normalize=_a )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : str = pipe(**self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE__ : Tuple = components["""vae"""]
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Any = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Any = pipe(**_a )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_a , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _a=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE__ : Dict = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : str = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
def callback_fn(_a , _a , _a ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : str = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_inputs()
pipe(**_a , callback=_a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Any = pipe(**_a )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""image"""].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : str = """timbrooks/instruct-pix2pix"""
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 132
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Union[str, Any] = ['flax', 'transformers']
def __init__( self :Dict ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Union[str, Any] ,*_UpperCamelCase :int ,**_UpperCamelCase :int ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :List[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : int = ['flax', 'transformers']
def __init__( self :Optional[int] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :int ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Tuple ,*_UpperCamelCase :Tuple ,**_UpperCamelCase :List[str] ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :int ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Optional[Any] ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Union[str, Any] = ['flax', 'transformers']
def __init__( self :Tuple ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Union[str, Any] ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :int ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Tuple ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Dict = ['flax', 'transformers']
def __init__( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Any ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Any ,*_UpperCamelCase :str ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Optional[Any] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :Union[str, Any] ):
requires_backends(cls ,["""flax""", """transformers"""] )
| 8
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
@slow
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
| 8
| 1
|
from collections import defaultdict
from math import gcd
def _A ( SCREAMING_SNAKE_CASE__ : int = 1500000 ):
UpperCamelCase :Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE__ , 2 ):
if gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > 1:
continue
UpperCamelCase :Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE__ , limit + 1 , SCREAMING_SNAKE_CASE__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325
| 0
|
'''simple docstring'''
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__ ( snake_case : Callable[[float], float] , snake_case : float , snake_case : float ) -> float:
"""simple docstring"""
a : float = a
a : float = b
if function(snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case ) == 0:
return b
elif (
function(snake_case ) * function(snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
a : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case ) == 0:
return mid
elif function(snake_case ) * function(snake_case ) < 0:
a : Tuple = mid
else:
a : Tuple = mid
a : Tuple = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE__ ( snake_case : float ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 345
|
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 10
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def A ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(variables["""target"""] )
SCREAMING_SNAKE_CASE__ = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE__ = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE__ = old["""decoder/logits_dense/kernel"""].T
return new
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
return state_dict
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(snake_case__ )
SCREAMING_SNAKE_CASE__ = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
SCREAMING_SNAKE_CASE__ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MTaConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE__ = UMTaEncoderModel(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A_ : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 165
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = 8
# DPR tok
lowerCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 184
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = 8
# DPR tok
lowerCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 184
| 1
|
'''simple docstring'''
from timeit import timeit
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : List[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCAmelCase ():
def do_benchmark(__lowerCAmelCase ) -> None:
_UpperCAmelCase : str = "import __main__ as z"
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__lowerCAmelCase ) = }""" )
_UpperCAmelCase : Any = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__lowerCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCAmelCase ) = }""" )
_UpperCAmelCase : Union[str, Any] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__lowerCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 234
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = (KDPMaDiscreteScheduler,)
__a : Dict = 10
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 210
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ ( __lowercase ):
A_ = 'new-model'
if is_tf_available():
class snake_case_ ( __lowercase ):
A_ = NewModelConfig
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = """bert-base-cased"""
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """bert-base-cased"""
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : List[Any] = TFAutoModelForPreTraining.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(_snake_case )
__lowerCAmelCase , __lowerCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(_snake_case )
__lowerCAmelCase , __lowerCAmelCase : str = TFAutoModelForMaskedLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
@require_tensorflow_probability
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(_snake_case )
__lowerCAmelCase , __lowerCAmelCase : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
__lowerCAmelCase : Any = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) , 14410 )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) , 14410 )
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(_snake_case , _snake_case )
__lowerCAmelCase : Optional[int] = copy.deepcopy(model.config )
__lowerCAmelCase : List[Any] = ["""FunnelBaseModel"""]
__lowerCAmelCase : Optional[int] = TFAutoModel.from_config(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
__lowerCAmelCase : Dict = TFAutoModel.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[str] )->Any:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , _snake_case )
__lowerCAmelCase : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case , _snake_case )
auto_class.register(_snake_case , _snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case , _snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase : Union[str, Any] = BertModelTester(self ).get_config()
__lowerCAmelCase : Any = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase : str = auto_class.from_config(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = auto_class.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase : Any = TFAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(_snake_case , revision="""aaaaaa""" )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__lowerCAmelCase : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(_snake_case , """Use `from_pt=True` to load this model""" ):
__lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def UpperCAmelCase__ ( self : int )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase : Dict = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 232
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class snake_case_ ( unittest.TestCase ):
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : str=13 , _snake_case : int=30 , _snake_case : str=2 , _snake_case : int=3 , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : Optional[int]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : str=0.1 , _snake_case : str=0.1 , _snake_case : str=10 , _snake_case : Any=0.02 , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : int = image_size
__lowerCAmelCase : int = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : str = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Tuple = (image_size // patch_size) ** 2
__lowerCAmelCase : Optional[Any] = num_patches + 1
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : str = FlaxViTModel(config=_snake_case )
__lowerCAmelCase : int = model(_snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Dict = (self.image_size, self.image_size)
__lowerCAmelCase : Any = (self.patch_size, self.patch_size)
__lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , _snake_case : str , _snake_case : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.type_sequence_label_size
__lowerCAmelCase : Tuple = FlaxViTForImageClassification(config=_snake_case )
__lowerCAmelCase : List[str] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : str = 1
__lowerCAmelCase : Any = FlaxViTForImageClassification(_snake_case )
__lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = model(_snake_case )
def UpperCAmelCase__ ( self : str )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : str )->None:
'''simple docstring'''
__lowerCAmelCase : List[str] = FlaxViTModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
__lowerCAmelCase : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Dict , **_snake_case : Union[str, Any] ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : List[Any] = model_jitted(**_snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : str = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_snake_case )
| 232
| 1
|
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"]
def __init__( self : Optional[int] , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Any:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : str ) ->Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ) ->Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ["flax", "transformers"]
def __init__( self : Any , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int] ) ->Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) ->str:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Dict ) ->Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"]
def __init__( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) ->List[str]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : str ) ->Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Any , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ["flax", "transformers"]
def __init__( self : Tuple , *_UpperCamelCase : str , **_UpperCamelCase : int ) ->Any:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ) ->List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 8
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 8
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 239
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :int = downstream_dict['''projector.weight''']
__SCREAMING_SNAKE_CASE :List[Any] = downstream_dict['''projector.bias''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.linear.weight''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : int ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''connector.weight''']
__SCREAMING_SNAKE_CASE :Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE :str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__SCREAMING_SNAKE_CASE :int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__SCREAMING_SNAKE_CASE :Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__SCREAMING_SNAKE_CASE :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__SCREAMING_SNAKE_CASE :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__SCREAMING_SNAKE_CASE :str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE :str = torch.load(a_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :str = checkpoint['''Downstream''']
__SCREAMING_SNAKE_CASE :str = UniSpeechSatConfig.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__SCREAMING_SNAKE_CASE :str = convert_classification(a_ , a_ , a_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__SCREAMING_SNAKE_CASE :Tuple = convert_diarization(a_ , a_ , a_ )
elif arch.endswith('''ForXVector''' ):
__SCREAMING_SNAKE_CASE :List[Any] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE :Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 239
| 1
|
from collections.abc import Callable
def lowerCamelCase_ ( _a : Callable[[float], float] , _a : float , _a : float ):
'''simple docstring'''
UpperCAmelCase_ : float = a
UpperCAmelCase_ : float = b
if function(_a ) == 0: # one of the a or b is a root for the function
return a
elif function(_a ) == 0:
return b
elif (
function(_a ) * function(_a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCAmelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_a ) == 0:
return mid
elif function(_a ) * function(_a ) < 0:
UpperCAmelCase_ : Tuple = mid
else:
UpperCAmelCase_ : Tuple = mid
UpperCAmelCase_ : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 345
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[10, 20, 30, 40] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , ) -> Any:
lowerCAmelCase_ :Optional[Any] = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Optional[int] = image_size
lowerCAmelCase_ :Dict = num_channels
lowerCAmelCase_ :Optional[Any] = embeddings_size
lowerCAmelCase_ :Union[str, Any] = hidden_sizes
lowerCAmelCase_ :List[Any] = depths
lowerCAmelCase_ :Any = is_training
lowerCAmelCase_ :Any = use_labels
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :Optional[int] = num_labels
lowerCAmelCase_ :int = scope
lowerCAmelCase_ :Any = len(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Any:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = TFResNetModel(config=__A )
lowerCAmelCase_ :Optional[int] = model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = TFResNetForImageClassification(__A )
lowerCAmelCase_ :int = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ :Union[str, Any] = config_and_inputs
lowerCAmelCase_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ :List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFResNetModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __lowerCAmelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
lowerCAmelCase_ :Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ :Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(__A , __A , __A ):
lowerCAmelCase_ :Any = model_class(__A )
lowerCAmelCase_ :List[str] = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase_ :Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ :Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ :List[Any] = layer_type
lowerCAmelCase_ :Dict = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ :int = True
check_hidden_states_output(__A , __A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :List[Any] = TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ :Union[str, Any] = self.default_image_processor
lowerCAmelCase_ :Any = prepare_img()
lowerCAmelCase_ :Optional[Any] = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ :Union[str, Any] = model(**__A )
# verify the logits
lowerCAmelCase_ :Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :List[Any] = tf.constant([-11.1069, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 366
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1
| 0
|
from __future__ import annotations
A : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : dict[str, list[str]] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Dict = source_vertex
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = {self.source_vertex}
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCamelCase )
lowerCamelCase__ : List[str] = vertex
queue.append(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(__lowerCamelCase )
if target_vertex_parent is None:
lowerCamelCase__ : Tuple = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowerCamelCase )
return self.shortest_path(__lowerCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
A : List[str] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 184
|
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
while a != 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if gcd(_A , _A ) != 1:
lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCamelCase__ : Tuple = ua // va
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 184
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 363
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a__ ( ):
raise RuntimeError('CUDA out of memory.' )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Tuple = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : str = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 162
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'codegen'
_A = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :int , a :Any=5_0_4_0_0 , a :Union[str, Any]=2_0_4_8 , a :Dict=2_0_4_8 , a :Union[str, Any]=4_0_9_6 , a :Tuple=2_8 , a :List[Any]=1_6 , a :List[str]=6_4 , a :List[Any]=None , a :List[Any]="gelu_new" , a :Optional[int]=0.0 , a :Optional[int]=0.0 , a :List[Any]=0.0 , a :Union[str, Any]=1E-5 , a :Any=0.02 , a :Dict=True , a :Any=5_0_2_5_6 , a :Tuple=5_0_2_5_6 , a :str=False , **a :Optional[int] , ) -> List[Any]:
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Any = n_ctx
__UpperCamelCase : int = n_positions
__UpperCamelCase : Union[str, Any] = n_embd
__UpperCamelCase : Optional[Any] = n_layer
__UpperCamelCase : Any = n_head
__UpperCamelCase : List[Any] = n_inner
__UpperCamelCase : Any = rotary_dim
__UpperCamelCase : List[str] = activation_function
__UpperCamelCase : Optional[int] = resid_pdrop
__UpperCamelCase : int = embd_pdrop
__UpperCamelCase : Union[str, Any] = attn_pdrop
__UpperCamelCase : Tuple = layer_norm_epsilon
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : List[Any] = use_cache
__UpperCamelCase : Any = bos_token_id
__UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Optional[int] , a :PretrainedConfig , a :str = "default" , a :List[PatchingSpec] = None , a :bool = False , ) -> Tuple:
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , "pad_token_id" , a ):
# TODO: how to do that better?
__UpperCamelCase : Optional[Any] = 0
@property
def _lowerCamelCase ( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
__UpperCamelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCamelCase : List[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
return self._config.n_layer
@property
def _lowerCamelCase ( self :Any ) -> int:
return self._config.n_head
def _lowerCamelCase ( self :List[Any] , a :PreTrainedTokenizer , a :int = -1 , a :int = -1 , a :bool = False , a :Optional[TensorType] = None , ) -> Mapping[str, Any]:
__UpperCamelCase : Dict = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : Optional[Any] = seqlen + 2
__UpperCamelCase : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase : Optional[int] = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
__UpperCamelCase : Tuple = common_inputs["attention_mask"]
if self.use_past:
__UpperCamelCase : List[str] = ordered_inputs["attention_mask"].dtype
__UpperCamelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self :List[str] ) -> int:
return 1_3
| 232
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 232
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : Optional[Any] = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 133
| 0
|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : int = 101 ):
lowercase_ : int = length
def __len__( self : List[str] ):
return self.length
def __getitem__( self : List[str] , lowercase_ : str ):
return i
class __magic_name__ :
def __call__( self : Dict , lowercase_ : Union[str, Any] ):
return {"input_ids": torch.tensor(lowercase_ ), "labels": torch.tensor(lowercase_ )}
class __magic_name__ ( nn.Module):
def __init__( self : Tuple ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase_ : Optional[int] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __magic_name__ ( _UpperCAmelCase):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase_ : List[str] = self.get_auto_remove_tmp_dir()
lowercase_ : Union[str, Any] = f'''--output_dir {output_dir}'''.split()
lowercase_ : str = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __magic_name__ ( _UpperCAmelCase):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase_ : Tuple = self.get_auto_remove_tmp_dir()
lowercase_ : str = f'''--output_dir {output_dir}'''.split()
lowercase_ : Optional[int] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_lowercase : Optional[Any] = HfArgumentParser((TrainingArguments,))
_lowercase : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_lowercase : Dict = DummyDataset(dataset_length)
def lowerCamelCase ( UpperCAmelCase__ : EvalPrediction ) -> Dict:
lowercase_ : str = list(range(len(UpperCAmelCase__ ) ) )
lowercase_ : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
_lowercase : int = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_lowercase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowercase : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowercase : Optional[int] = 2
_lowercase : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowercase : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowercase : Any = None
| 239
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : List[str]=7 , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : List[str]=99 , lowercase_ : Dict=32 , lowercase_ : List[Any]=5 , lowercase_ : List[str]=4 , lowercase_ : Dict=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Any=0.1 , lowercase_ : int=512 , lowercase_ : Tuple=16 , lowercase_ : str=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
lowercase_ : Tuple = parent
lowercase_ : Tuple = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : int = use_input_mask
lowercase_ : Union[str, Any] = use_token_type_ids
lowercase_ : Tuple = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : int = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = num_labels
lowercase_ : int = num_choices
lowercase_ : List[Any] = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[int] = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
lowercase_ : Optional[Any] = NystromformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ):
lowercase_ : List[Any] = NystromformerForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple ):
lowercase_ : Any = NystromformerForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int ):
lowercase_ : Any = self.num_labels
lowercase_ : Union[str, Any] = NystromformerForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : int = self.num_labels
lowercase_ : int = NystromformerForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : str = self.num_choices
lowercase_ : Union[str, Any] = NystromformerForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = NystromformerModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = NystromformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase_ : Tuple = model(lowercase_ )[0]
lowercase_ : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ : Dict = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = """the [MASK] of Belgium is Brussels"""
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : str = tokenizer(lowercase_ , return_tensors="""pt""" )
with torch.no_grad():
lowercase_ : Tuple = model(encoding.input_ids ).logits
lowercase_ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowercase_ ) , """capital""" )
| 239
| 1
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Tuple = {"""vocab_file""": """vocab.txt"""}
A_ : Tuple = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
A_ : Union[str, Any] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as reader:
_UpperCAmelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Any = token.rstrip("""\n""" )
_UpperCAmelCase : List[str] = index
return vocab
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_="<unk>" ,a_=200 ) -> Optional[Any]:
_UpperCAmelCase : Dict = vocab
_UpperCAmelCase : Dict = unk_token
_UpperCAmelCase : Any = max_input_chars_per_word
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : List[str] = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[Any] = []
while start < len(a_ ):
_UpperCAmelCase : Union[str, Any] = len(a_ )
_UpperCAmelCase : List[Any] = None
while start < end:
_UpperCAmelCase : int = """""".join(chars[start:end] )
if substr in self.vocab:
_UpperCAmelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
_UpperCAmelCase : List[str] = end
return sub_tokens
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
UpperCAmelCase = False
def __init__( self ,a_ ,a_="<d>" ,a_="</d>" ,a_="<s>" ,a_="</s>" ,a_="<pad>" ,a_="<unk>" ,a_="</n>" ,a_="</_>" ,a_="left" ,**a_ ,) -> List[Any]:
requires_backends(self ,["""jieba"""] )
super().__init__(
bod_token=a_ ,eod_token=a_ ,bos_token=a_ ,eos_token=a_ ,pad_token=a_ ,unk_token=a_ ,line_token=a_ ,space_token=a_ ,padding_side=a_ ,**a_ ,)
_UpperCAmelCase : Union[str, Any] = bod_token
_UpperCAmelCase : List[str] = eod_token
_UpperCAmelCase : str = load_vocab(a_ )
_UpperCAmelCase : int = self.encoder[space_token]
_UpperCAmelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_UpperCAmelCase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda a_ : x[1] ) )
_UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : Tuple = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.encoder[self.bod_token]
@property
def _snake_case ( self ) -> Tuple:
return self.encoder[self.eod_token]
@property
def _snake_case ( self ) -> str:
return self.encoder["\n"]
@property
def _snake_case ( self ) -> int:
return len(self.encoder )
def _snake_case ( self ) -> Dict:
return dict(self.encoder ,**self.added_tokens_encoder )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = []
for x in jieba.cut(a_ ,cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def _snake_case ( self ,a_ ,**a_ ) -> Optional[int]:
_UpperCAmelCase : int = [i for i in token_ids if i >= 0]
_UpperCAmelCase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ ,**a_ )
def _snake_case ( self ,a_ ) -> str:
return token in self.encoder
def _snake_case ( self ,a_ ) -> str:
return "".join(a_ )
def _snake_case ( self ,a_ ) -> Optional[int]:
return self.encoder.get(a_ ,self.encoder.get(self.unk_token ) )
def _snake_case ( self ,a_ ) -> Any:
return self.decoder.get(a_ ,self.unk_token )
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
if os.path.isdir(a_ ):
_UpperCAmelCase : int = os.path.join(
a_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_UpperCAmelCase : Optional[int] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_UpperCAmelCase : Any = 0
if " " in self.encoder:
_UpperCAmelCase : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_UpperCAmelCase : Optional[int] = self.encoder["""\n"""]
del self.encoder["\n"]
_UpperCAmelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda a_ : x[1] ) )
with open(a_ ,"""w""" ,encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
_UpperCAmelCase : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ ,token_ids_a=a_ ,already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 363
|
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349
| 0
|
__a = 2_56
# Modulus to hash a string
__a = 1_00_00_03
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : Any = len(snake_case_ )
lowercase : int = len(snake_case_ )
if p_len > t_len:
return False
lowercase : Optional[int] = 0
lowercase : str = 0
lowercase : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case_ ):
lowercase : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : str = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : List[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Dict = '''abc1abc12'''
lowercase : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowercase : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(snake_case_, snake_case_ ) and not rabin_karp(snake_case_, snake_case_ )
# Test 2)
lowercase : str = '''ABABX'''
lowercase : Optional[int] = '''ABABZABABYABABX'''
assert rabin_karp(snake_case_, snake_case_ )
# Test 3)
lowercase : str = '''AAAB'''
lowercase : Any = '''ABAAAAAB'''
assert rabin_karp(snake_case_, snake_case_ )
# Test 4)
lowercase : List[Any] = '''abcdabcy'''
lowercase : Optional[int] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(snake_case_, snake_case_ )
# Test 5)
lowercase : str = '''Lü'''
lowercase : str = '''Lüsai'''
assert rabin_karp(snake_case_, snake_case_ )
lowercase : List[str] = '''Lue'''
assert not rabin_karp(snake_case_, snake_case_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 337
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1
| 0
|
import functools
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowercase ) >= 366:
raise ValueError('All days elements should be less than 366' )
UpperCamelCase = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = LlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase : str = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
UpperCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
UpperCamelCase = 'Simply put, the theory of relativity states that '
UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' )
UpperCamelCase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ )
# greedy generation outputs
UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ )
UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 110
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowerCamelCase (unittest.TestCase ):
@require_torch
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = pipeline(
task='zero-shot-audio-classification',model='hf-internal-testing/tiny-clap-htsat-unfused' )
__UpperCamelCase = load_dataset('ashraq/esc50' )
__UpperCamelCase = dataset['train']['audio'][-1]['array']
__UpperCamelCase = audio_classifier(UpperCamelCase__,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ),[{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}],)
@unittest.skip('No models are available in TF' )
def snake_case_ ( self: Any ):
'''simple docstring'''
pass
@slow
@require_torch
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
task='zero-shot-audio-classification',model='laion/clap-htsat-unfused',)
# This is an audio of a dog
__UpperCamelCase = load_dataset('ashraq/esc50' )
__UpperCamelCase = dataset['train']['audio'][-1]['array']
__UpperCamelCase = audio_classifier(UpperCamelCase__,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ),[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],)
__UpperCamelCase = audio_classifier([audio] * 5,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ),[
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5,)
__UpperCamelCase = audio_classifier(
[audio] * 5,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'],batch_size=5 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ),[
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5,)
@unittest.skip('No models are available in TF' )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
pass
| 310
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
A_ = len(UpperCAmelCase__ )
# We need to create solution object to save path.
A_ = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
A_ = run_maze(UpperCAmelCase__, 0, 0, UpperCAmelCase__ )
if solved:
print("""\n""".join(str(UpperCAmelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
A_ = len(UpperCAmelCase__ )
# Final check point.
if i == j == (size - 1):
A_ = 1
return True
A_ = (not i < 0) and (not j < 0) # Check lower bounds
A_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ = 1
# check for directions
if (
run_maze(UpperCAmelCase__, i + 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j + 1, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j - 1, UpperCAmelCase__ )
):
return True
A_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = defaultdict(__a )
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__a )
snake_case_ : Any = new_doc_list
snake_case_ : str = [key for key, value in counts.items() if value > 1]
snake_case_ : Any = []
for duplicate_key in duplicates:
snake_case_ : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
snake_case_ : str = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__a )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : int = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : Dict = content[api_idx]['sections']
# Then to the model doc
snake_case_ : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
snake_case_ : Optional[Any] = clean_doc_toc(__a )
snake_case_ : int = False
if new_scheduler_doc != scheduler_doc:
snake_case_ : int = True
if overwrite:
snake_case_ : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
snake_case_ : Optional[int] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : str = content[api_idx]['sections']
# Then to the model doc
snake_case_ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case_ : Dict = False
snake_case_ : Union[str, Any] = api_doc[pipeline_idx]['sections']
snake_case_ : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case_ : Optional[Any] = pipeline_doc['section']
snake_case_ : Optional[int] = clean_doc_toc(__a )
if overwrite:
snake_case_ : Tuple = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
snake_case_ : Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
snake_case_ : List[str] = True
if overwrite:
snake_case_ : List[str] = new_pipeline_docs
if diff:
if overwrite:
snake_case_ : List[Any] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 88
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def _a (*lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _a (self , lowercase , lowercase ):
A_ : List[Any] = object_detector(examples[0] , threshold=0.0 )
A_ : int = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
"""score""": ANY(snake_case__ ),
"""label""": ANY(snake_case__ ),
"""box""": {"""xmin""": ANY(snake_case__ ), """ymin""": ANY(snake_case__ ), """xmax""": ANY(snake_case__ ), """ymax""": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a (self ):
pass
@require_torch
def _a (self ):
A_ : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : int = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
A_ : Dict = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _a (self ):
A_ : Union[str, Any] = pipeline("""zero-shot-object-detection""" )
A_ : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
A_ : Dict = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a (self ):
pass
@require_torch
@slow
def _a (self ):
A_ : Dict = 0.2
A_ : Tuple = pipeline("""zero-shot-object-detection""" )
A_ : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _a (self ):
A_ : Optional[Any] = 2
A_ : Dict = pipeline("""zero-shot-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 206
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
snake_case_ : Tuple = StableDiffusionLDMaDPipeline
snake_case_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCAmelCase = CLIPTextModel(snake_case__ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
_UpperCAmelCase = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
_UpperCAmelCase = text_inputs["input_ids"].to(snake_case__ )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case__ )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case__ )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = ldmad_pipe(**snake_case__ , negative_prompt=snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
_UpperCAmelCase = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str , snake_case__ : Optional[int] , snake_case__ : Tuple="cpu" , snake_case__ : Any=torch.floataa , snake_case__ : Dict=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCAmelCase = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
_UpperCAmelCase = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : int="cpu" , snake_case__ : Optional[Any]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.495_586
_UpperCAmelCase = 0.33_795_515
_UpperCAmelCase = 112.48_518
_UpperCAmelCase = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4_194_127
_UpperCAmelCase = 0.35_375_586
_UpperCAmelCase = 0.5_638_502
_UpperCAmelCase = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 133
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case_(_UpperCamelCase ) -> int:
"""simple docstring"""
return (data["data"], data["target"])
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = XGBClassifier()
classifier.fit(_A , _A )
return classifier
def snake_case_() -> Tuple:
"""simple docstring"""
_snake_case = load_iris()
_snake_case = data_handling(_A )
_snake_case = train_test_split(
_A , _A , test_size=0.25 )
_snake_case = iris['target_names']
# Create an XGBoost Classifier from the training data
_snake_case = xgboost(_A , _A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_A , _A , _A , display_labels=_A , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 353
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase_ : List[Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def snake_case_() -> int:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_() -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_() -> int:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 278
| 0
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Any , _snake_case : Any ) ->Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Optional[Any] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : Tuple = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case : int = value
elif weight_type == "weight_g":
__snake_case : List[str] = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : Any = value
else:
__snake_case : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Tuple , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : str = fairseq_model.state_dict()
__snake_case : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : str = True
if "*" in mapped_key:
__snake_case : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : str = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Dict = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__snake_case : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : str = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : str = full_name.split('''conv_layers.''' )[-1]
__snake_case : Union[str, Any] = name.split('''.''' )
__snake_case : Optional[Any] = int(items[0] )
__snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : str=None ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = torch.load(_snake_case )
__snake_case : Any = WavLMConfigOrig(checkpoint['''cfg'''] )
__snake_case : Tuple = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__snake_case : List[str] = WavLMConfig.from_pretrained(_snake_case )
else:
__snake_case : Optional[Any] = WavLMConfig()
__snake_case : int = WavLMModel(_snake_case )
recursively_load_weights(_snake_case , _snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 102
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCamelCase_ : List[Any] = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
UpperCamelCase_ : Tuple = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = ''' Hello world! cécé herlolip'''
UpperCamelCase_ : Dict = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __a ( _UpperCamelCase: str , _UpperCamelCase: Any , _UpperCamelCase: Optional[int] ) -> Optional[int]:
"""simple docstring"""
_snake_case = dct.pop(_UpperCamelCase )
_snake_case = val
def __a ( _UpperCamelCase: Dict ) -> int:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location="cpu" )
_snake_case = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def __a ( _UpperCamelCase: Optional[int] ) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: List[Any] , _UpperCamelCase: List[Any]=None ) -> int:
"""simple docstring"""
if not os.path.exists(_UpperCamelCase ):
_snake_case = torch.hub.load("pytorch/fairseq" , _UpperCamelCase ).eval()
else:
_snake_case = load_xsum_checkpoint(_UpperCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case = checkpoint_path.replace("." , "-" )
_snake_case = BartConfig.from_pretrained(_UpperCamelCase )
_snake_case = bart.encode(_UpperCamelCase ).unsqueeze(0 )
_snake_case = BartTokenizer.from_pretrained(_UpperCamelCase ).encode(_UpperCamelCase , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(_UpperCamelCase , _UpperCamelCase ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_snake_case = bart.state_dict()
remove_ignore_keys_(_UpperCamelCase )
_snake_case = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = BartForSequenceClassification(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
_snake_case = bart.predict("mnli" , _UpperCamelCase , return_logits=_UpperCamelCase )
_snake_case = model(_UpperCamelCase )[0] # logits
else: # no classification heads to worry about
_snake_case = bart.model.state_dict()
remove_ignore_keys_(_UpperCamelCase )
_snake_case = state_dict["decoder.embed_tokens.weight"]
_snake_case = bart.extract_features(_UpperCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case = BartModel(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
_snake_case = model(_UpperCamelCase ).model[0]
else:
_snake_case = BartForConditionalGeneration(_UpperCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_UpperCamelCase )
if hasattr(_UpperCamelCase , "lm_head" ):
_snake_case = make_linear_from_emb(model.model.shared )
_snake_case = model.model(_UpperCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 142
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = """sew"""
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0_5 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = squeeze_factor
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# sequence classification
_snake_case = use_weighted_layer_sum
_snake_case = classifier_proj_size
@property
def _lowercase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 142
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
lowerCAmelCase = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
lowerCAmelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowerCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 110
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**UpperCamelCase_ )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110
| 1
|
def _UpperCAmelCase (UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[Any] = knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase : Union[str, Any] = values[index] + knapsack(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["ConditionalDetrFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 159
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[str]=[10, 20, 30, 40] , UpperCamelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = self.get_config()
return config, pixel_values
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__magic_name__ = FlaxRegNetModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a__ = False
a__ = False
a__ = False
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = FlaxRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" )
__magic_name__ = model(**UpperCamelCase__ )
# verify the logits
__magic_name__ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=2 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : List[str] = patch_size
__UpperCamelCase : int = max_length
__UpperCamelCase : Tuple = num_mel_bins
__UpperCamelCase : str = is_training
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : List[Any] = intermediate_size
__UpperCamelCase : Dict = hidden_act
__UpperCamelCase : Tuple = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : List[str] = scope
__UpperCamelCase : Optional[int] = frequency_stride
__UpperCamelCase : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase : Optional[Any] = frequency_out_dimension * time_out_dimension
__UpperCamelCase : List[Any] = num_patches + 2
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase : Tuple = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[Any] = self.get_config()
return config, input_values, labels
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase : Optional[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = False
lowercase : Optional[int] = False
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = ASTModelTester(self )
__UpperCamelCase : Tuple = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Optional[int] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = model_class(__UpperCamelCase )
__UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCamelCase : Optional[int] = ["input_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
__UpperCamelCase , __UpperCamelCase : str = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Any = self.default_feature_extractor
__UpperCamelCase : Any = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCamelCase )
__UpperCamelCase : int = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase : Tuple = prepare_audio()
__UpperCamelCase : Dict = audio.squeeze().numpy()
__UpperCamelCase : List[str] = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
# verify the logits
__UpperCamelCase : Optional[Any] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 171
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = 'marian'
lowercase : int = ['past_key_values']
lowercase : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=5_81_01 , __UpperCamelCase=None , __UpperCamelCase=10_24 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=10_24 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=5_81_00 , __UpperCamelCase=False , __UpperCamelCase=5_81_00 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = vocab_size
__UpperCamelCase : str = decoder_vocab_size or vocab_size
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : Optional[int] = encoder_ffn_dim
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : Tuple = encoder_attention_heads
__UpperCamelCase : Dict = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_layers
__UpperCamelCase : Optional[int] = decoder_attention_heads
__UpperCamelCase : Union[str, Any] = dropout
__UpperCamelCase : List[str] = attention_dropout
__UpperCamelCase : int = activation_dropout
__UpperCamelCase : Tuple = activation_function
__UpperCamelCase : List[str] = init_std
__UpperCamelCase : int = encoder_layerdrop
__UpperCamelCase : List[Any] = decoder_layerdrop
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : str = encoder_layers
__UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase : str = {0: "batch"}
__UpperCamelCase : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__UpperCamelCase : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase , __UpperCamelCase : Any = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : Any = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
__UpperCamelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super().outputs
else:
__UpperCamelCase : Optional[Any] = super(__UpperCamelCase , self ).outputs
if self.use_past:
__UpperCamelCase , __UpperCamelCase : int = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__UpperCamelCase : Any = seq_length if not self.use_past else 1
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase : List[Any] = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Dict = common_inputs["input_ids"].shape
__UpperCamelCase : Dict = common_inputs["decoder_input_ids"].shape[1]
__UpperCamelCase , __UpperCamelCase : Any = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : List[str] = decoder_seq_length + 3
__UpperCamelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_layers
__UpperCamelCase : Optional[int] = min(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Optional[int] = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__UpperCamelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : int = seqlen + 2
__UpperCamelCase , __UpperCamelCase : str = self.num_layers
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Any = common_inputs["attention_mask"].dtype
__UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase : List[Any] = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase : Tuple = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase : Tuple = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__UpperCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__UpperCamelCase : str = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 171
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__lowerCamelCase : Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
with open(lowerCAmelCase , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class a__ :
A = field(
default=A__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class a__ :
A = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A = field(
default=A__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )} , )
A = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A = field(
default=A__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.stack([example["pixel_values"] for example in examples] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ : str = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE_ : int = os.path.join(data_args.validation_dir , "**" )
SCREAMING_SNAKE_CASE_ : Dict = load_dataset(
"imagefolder" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ : Optional[Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ : str = dataset["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ : str = split["train"]
SCREAMING_SNAKE_CASE_ : List[Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ : Any = dataset["train"].features["labels"].names
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ : Tuple = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[str] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE_ : List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE_ : Dict = Compose(
[
RandomResizedCrop(lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Compose(
[
Resize(lowerCAmelCase ),
CenterCrop(lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ : str = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE_ : Dict = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = last_checkpoint
SCREAMING_SNAKE_CASE_ : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : int = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 18
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Union[str, Any] , lowerCamelCase: Dict , lowerCamelCase: Tuple ) -> Dict:
'''simple docstring'''
__A = original_name.split('''.''' )[0]
__A = key.split('''.''' )
__A = int(key_list[key_list.index(lowerCamelCase ) - 2] )
__A = int(key_list[key_list.index(lowerCamelCase ) - 1] )
__A = orig_block_num - offset
__A = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _a ( lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = OrderedDict()
__A , __A = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__A = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A = key[: key.find('''proj''' )]
__A = key.replace(lowerCamelCase , F"""patch_embeddings.{total_embed_found}.""" )
__A = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__A = replace_key_with_offset(lowerCamelCase , lowerCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__A = key.replace('''head''' , '''classifier''' )
__A = value
return new_state_dict
def _a ( ) -> str:
'''simple docstring'''
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def _a ( lowerCamelCase: Union[str, Any] , lowerCamelCase: List[Any] , lowerCamelCase: Any ) -> int:
'''simple docstring'''
__A = PoolFormerConfig()
# set attributes based on model_name
__A = '''huggingface/label-files'''
__A = model_name[-3:]
__A = 10_00
__A = '''imagenet-1k-id2label.json'''
__A = (1, 10_00)
# set config attributes
__A = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if size == "s12":
__A = [2, 2, 6, 2]
__A = [64, 1_28, 3_20, 5_12]
__A = 4.0
__A = 0.9
elif size == "s24":
__A = [4, 4, 12, 4]
__A = [64, 1_28, 3_20, 5_12]
__A = 4.0
__A = 0.9
elif size == "s36":
__A = [6, 6, 18, 6]
__A = [64, 1_28, 3_20, 5_12]
__A = 4.0
__A = 1e-6
__A = 0.9
elif size == "m36":
__A = [6, 6, 18, 6]
__A = [96, 1_92, 3_84, 7_68]
__A = 4.0
__A = 1e-6
__A = 0.95
elif size == "m48":
__A = [8, 8, 24, 8]
__A = [96, 1_92, 3_84, 7_68]
__A = 4.0
__A = 1e-6
__A = 0.95
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
__A = PoolFormerImageProcessor(crop_pct=lowerCamelCase )
# Prepare image
__A = prepare_img()
__A = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
__A = torch.load(lowerCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
__A = rename_keys(lowerCamelCase )
# create HuggingFace model and load state dict
__A = PoolFormerForImageClassification(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Define image processor
__A = PoolFormerImageProcessor(crop_pct=lowerCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__A = model(lowerCamelCase )
__A = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__A = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__A = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__A = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__A = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
snake_case__ : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 351
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_A : List[str] = logging.getLogger(__name__)
_A : Tuple = {'facebook/bart-base': BartForConditionalGeneration}
_A : Tuple = {'facebook/bart-base': BartTokenizer}
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=UpperCAmelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=UpperCAmelCase , default=UpperCAmelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase , )
parser.add_argument(
'''--config_name''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=UpperCAmelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Where to store the final ONNX file.''' )
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
return args
def _a ( UpperCAmelCase , UpperCAmelCase="cpu" ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : str = model_dict[model_name].from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : int = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Any = 0
return huggingface_model, tokenizer
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
model.eval()
lowerCamelCase__ : int = None
lowerCamelCase__ : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase ) )
with torch.no_grad():
lowerCamelCase__ : Tuple = '''My friends are cool but they eat too many carbs.'''
lowerCamelCase__ : Optional[int] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
lowerCamelCase__ : List[str] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCAmelCase , max_length=UpperCAmelCase , early_stopping=UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=UpperCAmelCase , )
logger.info('''Model exported to {}'''.format(UpperCAmelCase ) )
lowerCamelCase__ : Any = remove_dup_initializers(os.path.abspath(UpperCAmelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCAmelCase ) )
lowerCamelCase__ : Dict = onnxruntime.InferenceSession(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = ort_sess.run(
UpperCAmelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCAmelCase ),
'''max_length''': np.array(UpperCAmelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = parse_args()
lowerCamelCase__ : Optional[int] = 5
lowerCamelCase__ : Tuple = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCamelCase__ : Union[str, Any] = torch.device(args.device )
lowerCamelCase__ , lowerCamelCase__ : Any = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCAmelCase )
if args.max_length:
lowerCamelCase__ : int = args.max_length
if args.num_beams:
lowerCamelCase__ : Optional[int] = args.num_beams
if args.output_file_path:
lowerCamelCase__ : Any = args.output_file_path
else:
lowerCamelCase__ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 142
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 142
| 1
|
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> float:
"""simple docstring"""
def get_matched_characters(__snake_case : str , __snake_case : str ) -> str:
lowerCamelCase_ =[]
lowerCamelCase_ =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase_ =int(max(0 , i - limit ) )
lowerCamelCase_ =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
lowerCamelCase_ =F'''{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'''
return "".join(__snake_case )
# matching characters
lowerCamelCase_ =get_matched_characters(__snake_case , __snake_case )
lowerCamelCase_ =get_matched_characters(__snake_case , __snake_case )
lowerCamelCase_ =len(__snake_case )
# transposition
lowerCamelCase_ =(
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase_ =0.0
else:
lowerCamelCase_ =(
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase_ =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 6
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Tuple )->str:
'''simple docstring'''
snake_case_ = k_size // 2
snake_case_ , snake_case_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case_ = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase_ ) + square(lowerCAmelCase_ )) / (2 * square(lowerCAmelCase_ )) )
return g
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple , lowerCAmelCase_ :str , lowerCAmelCase_ :Any )->int:
'''simple docstring'''
snake_case_ , snake_case_ = image.shape[0], image.shape[1]
# dst image height and width
snake_case_ = height - k_size + 1
snake_case_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case_ = zeros((dst_height * dst_width, k_size * k_size) )
snake_case_ = 0
for i, j in product(range(lowerCAmelCase_ ) , range(lowerCAmelCase_ ) ):
snake_case_ = ravel(image[i : i + k_size, j : j + k_size] )
snake_case_ = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case_ = gen_gaussian_kernel(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = ravel(lowerCAmelCase_ )
# reshape and get the dst image
snake_case_ = dot(lowerCAmelCase_ , lowerCAmelCase_ ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ).astype(lowerCAmelCase_ )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE :Dict = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
SCREAMING_SNAKE_CASE :Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE :Optional[Any] = gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE :List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 159
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->int:
'''simple docstring'''
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Optional[int]="" , lowerCAmelCase_ :int="." ):
snake_case_ = []
for k, v in d.items():
snake_case_ = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
snake_case_ = argparse.Namespace()
with open(lowerCAmelCase_ , "r" ) as yaml_file:
try:
snake_case_ = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader )
snake_case_ = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple )->Union[str, Any]:
'''simple docstring'''
snake_case_ = MobileViTVaConfig()
snake_case_ = False
# dataset
if task_name.startswith("imagenet1k_" ):
snake_case_ = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
snake_case_ = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
snake_case_ = 151
snake_case_ = 512
snake_case_ = "ade20k-id2label.json"
snake_case_ = True
elif task_name.startswith("voc_" ):
snake_case_ = 21
snake_case_ = 512
snake_case_ = "pascal-voc-id2label.json"
snake_case_ = True
# orig_config
snake_case_ = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(lowerCAmelCase_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
snake_case_ = "huggingface/label-files"
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Optional[Any] )->Optional[Any]:
'''simple docstring'''
snake_case_ = dct.pop(lowerCAmelCase_ )
snake_case_ = val
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :int=False )->Dict:
'''simple docstring'''
if base_model:
snake_case_ = ""
else:
snake_case_ = "mobilevitv2."
snake_case_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case_ = k[8:]
else:
snake_case_ = k
if ".block." in k:
snake_case_ = k_new.replace(".block." , "." )
if ".conv." in k:
snake_case_ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
snake_case_ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
snake_case_ = k_new.replace("conv_1." , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
snake_case_ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
snake_case_ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
snake_case_ = [0, 1]
elif i == 4:
snake_case_ = [0, 1, 2, 3]
elif i == 5:
snake_case_ = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
snake_case_ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
snake_case_ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
snake_case_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
snake_case_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
snake_case_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
snake_case_ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
snake_case_ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
snake_case_ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
snake_case_ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Optional[int]:
'''simple docstring'''
snake_case_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
snake_case_ = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ )
# load original state_dict
snake_case_ = torch.load(lowerCAmelCase_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
snake_case_ = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
snake_case_ = False
else:
snake_case_ = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
snake_case_ = False
# remove and rename some keys of load the original model
snake_case_ = checkpoint
remove_unused_keys(lowerCAmelCase_ )
snake_case_ = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith("imagenet" ):
snake_case_ = outputs.logits
snake_case_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 159
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any]):
_A : int = tempfile.mkdtemp()
_A : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
_A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
_A : List[str] = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'do_convert_rgb': True,
}
_A : List[str] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[Any]):
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int]):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any]):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
shutil.rmtree(self.tmpdirname)
def A ( self : Optional[Any]):
_A : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
_A : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def A ( self : Optional[int]):
_A : int = self.get_tokenizer()
_A : Optional[int] = self.get_rust_tokenizer()
_A : Union[str, Any] = self.get_image_processor()
_A : Dict = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
_A : str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE)
_A : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
_A : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE)
def A ( self : List[str]):
_A : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_A : List[str] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)')
_A : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : Union[str, Any] = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : Dict = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
_A : Any = self.prepare_image_inputs()
_A : Any = image_processor(SCREAMING_SNAKE_CASE , return_tensors='np')
_A : Any = processor(images=SCREAMING_SNAKE_CASE , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def A ( self : Optional[int]):
_A : List[str] = self.get_image_processor()
_A : str = self.get_tokenizer()
_A : Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
_A : Optional[Any] = 'Alexandra,T-shirt的价格是15便士。'
_A : Any = processor(text=SCREAMING_SNAKE_CASE)
_A : str = tokenizer(SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def A ( self : List[str]):
_A : int = self.get_image_processor()
_A : List[Any] = self.get_tokenizer()
_A : List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
_A : Dict = 'Alexandra,T-shirt的价格是15便士。'
_A : Optional[Any] = self.prepare_image_inputs()
_A : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE):
processor()
def A ( self : Any):
_A : Optional[int] = self.get_image_processor()
_A : List[Any] = self.get_tokenizer()
_A : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
_A : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A : int = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
_A : int = 'Alexandra,T-shirt的价格是15便士。'
_A : int = self.prepare_image_inputs()
_A : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 227
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = torch.device('''cpu''')
def lowerCAmelCase__ ( ):
_A : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : Dict = Image.open(requests.get(lowerCamelCase ,stream=lowerCamelCase ).raw )
return im
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
_A : Union[str, Any] = dct.pop(lowerCamelCase )
_A : List[str] = val
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ):
_A : Optional[Any] = []
for k in state_dict.keys():
_A : Optional[int] = k
if ".pwconv" in k:
_A : str = k_new.replace('.pwconv' ,'.point_wise_conv' )
if ".dwconv" in k:
_A : Any = k_new.replace('.dwconv' ,'.depth_wise_conv' )
if ".Proj." in k:
_A : Optional[Any] = k_new.replace('.Proj.' ,'.proj.' )
if "patch_embed" in k_new:
_A : Optional[int] = k_new.replace('patch_embed' ,'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_A : Tuple = k_new.split('.' )
if ls[2].isdigit():
_A : List[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_A : List[str] = k_new.replace('network' ,'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : Any = 1000
_A : int = 'huggingface/label-files'
_A : List[Any] = 'imagenet-1k-id2label.json'
_A : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase ,lowerCamelCase ,repo_type='dataset' ) ,'r' ) )
_A : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : Optional[Any] = [3, 3, 6, 4]
_A : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : List[Any] = [3, 3, 9, 6]
_A : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : int = [4, 3, 10, 5]
_A : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Optional[Any] = [4, 4, 12, 6]
_A : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase ,map_location='cpu' ,check_hash=lowerCamelCase )
else:
_A : Union[str, Any] = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Union[str, Any] = checkpoint
_A : List[str] = create_rename_keys(lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(lowerCamelCase ).eval()
hf_model.load_state_dict(lowerCamelCase )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
_A : Any = processor(images=lowerCamelCase ,return_tensors='pt' )
# compare outputs from both models
_A : int = get_expected_output(lowerCamelCase )
_A : Optional[int] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,lowerCamelCase ,atol=1E-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 227
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'falcon'
SCREAMING_SNAKE_CASE = ['past_key_values']
def __init__(self , _lowerCamelCase=65024 , _lowerCamelCase=4544 , _lowerCamelCase=32 , _lowerCamelCase=71 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=11 , _lowerCamelCase=11 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase__ : Tuple = kwargs.pop("""n_embed""" , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = hidden_size if n_embed is None else n_embed
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Any = layer_norm_epsilon
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Optional[Any] = hidden_dropout
UpperCAmelCase__ : Dict = attention_dropout
UpperCAmelCase__ : Tuple = bos_token_id
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase__ : Tuple = alibi
UpperCAmelCase__ : Dict = new_decoder_architecture
UpperCAmelCase__ : Dict = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase__ : List[Any] = parallel_attn
UpperCAmelCase__ : Any = bias
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _a (self ):
"""simple docstring"""
return not self.alibi
| 171
|
"""simple docstring"""
import numpy as np
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1E-12 , lowerCAmelCase = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase ) == np.iscomplexobj(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = np.iscomplexobj(lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[int] = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : int = np.dot(lowerCAmelCase , lowerCAmelCase )
# Normalize the resulting output vector.
UpperCAmelCase__ : Optional[Any] = w / np.linalg.norm(lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : List[Any] = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : Optional[Any] = np.dot(lowerCAmelCase , np.dot(lowerCAmelCase , lowerCAmelCase ) )
# Check convergence.
UpperCAmelCase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = lambda_
if is_complex:
UpperCAmelCase__ : Any = np.real(lambda_ )
return lambda_, vector
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : int = np.array([41, 4, 20] )
UpperCAmelCase__ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Any = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : List[Any] = complex_input_matrix
UpperCAmelCase__ : int = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : int = power_iteration(lowerCAmelCase , lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = np.linalg.eigh(lowerCAmelCase )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase ) - np.abs(lowerCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 171
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : int = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCAmelCase_ : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCAmelCase_ : List[Any] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCAmelCase_ : str = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120
|
import json
import sys
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[str] ) -> Tuple:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : Union[str, Any] = json.load(__A )
a_ : Any = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__A ):
a_ : List[str] = results[benchmark_name]
a_ : int = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a_ : Any = '| metric |'
a_ : Optional[Any] = '|--------|'
a_ : int = '| new / old (diff) |'
for metric_name in sorted(__A ):
a_ : List[Any] = benchmark_res[metric_name]
a_ : int = metric_vals['new']
a_ : Union[str, Any] = metric_vals.get('old' , __A )
a_ : Optional[int] = metric_vals.get('diff' , __A )
a_ : str = F""" {new_val:f}""" if isinstance(__A , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__A , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(__A ) )
if __name__ == "__main__":
UpperCAmelCase_ : int = sys.argv[1]
UpperCAmelCase_ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 120
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE :Optional[Any] = '''\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'''
SCREAMING_SNAKE_CASE :Optional[Any] = '''\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'''
SCREAMING_SNAKE_CASE :List[Any] = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=None ) -> List[str]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase ) ),
}
| 159
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=4 , ):
"""simple docstring"""
_lowercase : int = parent
_lowercase : List[str] = batch_size
_lowercase : Tuple = seq_length
_lowercase : Any = is_training
_lowercase : List[Any] = use_attention_mask
_lowercase : Dict = use_token_type_ids
_lowercase : int = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : int = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Optional[int] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[Any] = num_choices
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = None
if self.use_attention_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : str = None
if self.use_token_type_ids:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = config_and_inputs
_lowercase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs
_lowercase : Any = True
_lowercase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : List[str] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : Optional[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_lowercase : Optional[Any] = model(_UpperCamelCase )[0]
_lowercase : Any = [1, 11, 50265]
self.assertEqual(list(output.shape ) , _UpperCamelCase )
# compare the actual values for a slice.
_lowercase : Dict = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_lowercase : List[Any] = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
_lowercase : List[str] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 250
| 0
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Dict ):
'''simple docstring'''
UpperCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
UpperCamelCase__ = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=UpperCamelCase__, exist_ok=UpperCamelCase__ )
UpperCamelCase__ = os.path.join(UpperCamelCase__, '''README.md''' )
print(F"""Generating {path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
lowercase = Path(__file__).resolve().parent.parent.parent
lowercase = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 35
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 6
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
UpperCAmelCase : int = {
"""openbmb/cpm-ant-10b""": 1024,
}
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : List[str] =collections.OrderedDict()
with open(_snake_case , "r" , encoding="utf-8" ) as reader:
a__ : Optional[Any] =reader.readlines()
for index, token in enumerate(_snake_case ):
a__ : Union[str, Any] =token.rstrip("\n" )
a__ : Tuple =index
return vocab
class __lowerCAmelCase ( __snake_case):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__=2_0_0 ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict =vocab
a__ : Dict =unk_token
a__ : Optional[Any] =max_input_chars_per_word
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Any =list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
a__ : Union[str, Any] =0
a__ : Union[str, Any] =[]
while start < len(a_ ):
a__ : int =len(a_ )
a__ : List[Any] =None
while start < end:
a__ : List[Any] =''''''.join(chars[start:end] )
if substr in self.vocab:
a__ : int =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
a__ : Tuple =end
return sub_tokens
class __lowerCAmelCase ( __snake_case):
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = ["""input_ids""", """attention_mask"""]
_lowercase : str = False
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<d>" , lowerCAmelCase__="</d>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="</n>" , lowerCAmelCase__="</_>" , lowerCAmelCase__="left" , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
a__ : Dict =bod_token
a__ : Dict =eod_token
a__ : Optional[Any] =load_vocab(a_ )
a__ : Dict =self.encoder[space_token]
a__ : Union[str, Any] =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a__ : Optional[Any] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
a__ : Optional[int] ={v: k for k, v in self.encoder.items()}
a__ : Union[str, Any] =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.encoder["\n"]
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : str =[]
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def _lowercase ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] =[i for i in token_ids if i >= 0]
a__ : Optional[Any] =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return token in self.encoder
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return "".join(a_ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(a_ , self.unk_token )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
if os.path.isdir(a_ ):
a__ : Any =os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
a__ : Dict =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
a__ : Any =0
if " " in self.encoder:
a__ : Any =self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
a__ : List[str] =self.encoder['''\n''']
del self.encoder["\n"]
a__ : str =collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(a_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
a__ : Tuple =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 352
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase : Union[str, Any] = 1.054571817E-34 # unit of ℏ : J * s
UpperCAmelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
a__ : Tuple =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a__ : Any =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a__ : List[str] =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase: str = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowercase: List[Any] = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowercase: str = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase: Dict = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase: Union[str, Any] = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def a( A : Optional[Any] , A : Optional[Any] ) -> Any:
"""simple docstring"""
for tf_name, hf_name in patterns:
a = k.replace(A , A )
return k
def a( A : dict , A : dict ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
a = BigBirdPegasusConfig(**A )
a = BigBirdPegasusForConditionalGeneration(A )
a = torch_model.state_dict()
a = {}
# separating decoder weights
a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
a = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
a = DECODER_PATTERNS
a = rename_state_dict_key(A , A )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a = v.T
a = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
a = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
a = REMAINING_PATTERNS
a = rename_state_dict_key(A , A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a = v.T
a = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
a = mapping["model.embed_positions.weight"]
a = mapping.pop("model.embed_positions.weight" )
a , a = torch_model.load_state_dict(A , strict=A )
a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = tf.train.list_variables(A )
a = {}
a = ["global_step"]
for name, shape in tqdm(A , desc="converting tf checkpoint to dict" ):
a = any(pat in name for pat in ignore_name )
if skip_key:
continue
a = tf.train.load_variable(A , A )
a = array
return tf_weights
def a( A : str , A : str , A : dict ) -> Dict:
"""simple docstring"""
a = get_tf_weights_as_numpy(A )
a = convert_bigbird_pegasus(A , A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: List[str] = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase: Optional[Any] = parser.parse_args()
_lowercase: int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 227
|
from __future__ import annotations
_lowercase: Tuple = list[list[int]]
# assigning initial values to the grid
_lowercase: Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowercase: Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a( A : Matrix , A : int , A : int , A : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a( A : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a( A : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(A ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
a = digit
if sudoku(A ) is not None:
return grid
a = 0
return None
def a( A : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(A , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_lowercase: List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 227
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :Tuple = "AutoImageProcessor"
SCREAMING_SNAKE_CASE__ :Optional[Any] = "AutoTokenizer"
def __init__( self : Any , __a : List[str] , __a : List[Any] ) -> List[str]:
super().__init__(__a , __a )
_UpperCamelCase : Dict = self.image_processor
def __call__( self : List[str] , __a : Optional[Any]=None , __a : Any=None , __a : int=None , **__a : int ) -> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_UpperCamelCase : List[str] = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_UpperCamelCase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : Union[str, Any] , **__a : Tuple ) -> List[str]:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : Dict , **__a : Dict ) -> List[str]:
return self.tokenizer.decode(*__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( A__ : str , A__ : Any , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : str = BertConfig.from_json_file(A__ )
print(f'Building PyTorch model from configuration: {config}' )
lowerCAmelCase_ : Dict = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 120
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Any = OmegaConf.load(_lowerCamelCase )
A : str = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
A : Tuple = list(state_dict.keys() )
# extract state_dict for VQVAE
A : int = {}
A : Any = "first_stage_model."
for key in keys:
if key.startswith(_lowerCamelCase ):
A : Optional[int] = state_dict[key]
# extract state_dict for UNetLDM
A : List[str] = {}
A : List[str] = "model.diffusion_model."
for key in keys:
if key.startswith(_lowerCamelCase ):
A : Optional[int] = state_dict[key]
A : List[Any] = config.model.params.first_stage_config.params
A : Any = config.model.params.unet_config.params
A : Tuple = VQModel(**_lowerCamelCase ).eval()
vqvae.load_state_dict(_lowerCamelCase )
A : Any = UNetLDMModel(**_lowerCamelCase ).eval()
unet.load_state_dict(_lowerCamelCase )
A : List[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowerCamelCase , )
A : List[Any] = LDMPipeline(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipeline.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 367
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "BridgeTowerImageProcessor"
a__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Optional[int]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
A : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
A : List[Any] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Any:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : Dict = self.tokenizer.model_input_names
A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 256
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCAmelCase )] )
snake_case__ : Optional[int] = np.array(_lowerCAmelCase )
snake_case__ : Any = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCAmelCase ) ) , x.transpose() ) , _lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : List[str] = (1, 2, 1)
snake_case__ : Tuple = (1, 1, 0, 7)
snake_case__ : Dict = SARIMAX(
_lowerCAmelCase , exog=_lowerCAmelCase , order=_lowerCAmelCase , seasonal_order=_lowerCAmelCase )
snake_case__ : List[Any] = model.fit(disp=_lowerCAmelCase , maxiter=600 , method="""nm""" )
snake_case__ : List[Any] = model_fit.predict(1 , len(_lowerCAmelCase ) , exog=[test_match] )
return result[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : Tuple = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = regressor.predict(_lowerCAmelCase )
return y_pred[0]
def __snake_case( _lowerCAmelCase ) -> float:
train_user.sort()
snake_case__ : Dict = np.percentile(_lowerCAmelCase , 25 )
snake_case__ : int = np.percentile(_lowerCAmelCase , 75 )
snake_case__ : List[Any] = qa - qa
snake_case__ : str = qa - (iqr * 0.1)
return low_lim
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
snake_case__ : Tuple = 0
snake_case__ : Any = 0
for i in list_vote:
if i > actual_result:
snake_case__ : Dict = not_safe + 1
else:
if abs(abs(_lowerCAmelCase ) - abs(_lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__a = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__a = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__a = Normalizer().fit_transform(data_input_df.values)
# split data
__a = normalize_df[:, 2].tolist()
__a = normalize_df[:, 0].tolist()
__a = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__a = normalize_df[:, [1, 2]].tolist()
__a = x[: len(x) - 1]
__a = x[len(x) - 1 :]
# for linear regression & sarimax
__a = total_date[: len(total_date) - 1]
__a = total_user[: len(total_user) - 1]
__a = total_match[: len(total_match) - 1]
__a = total_date[len(total_date) - 1 :]
__a = total_user[len(total_user) - 1 :]
__a = total_match[len(total_match) - 1 :]
# voting system with forecasting
__a = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__a = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 35
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Dict=6.0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str="fp4" , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
_A: Dict = load_in_abit
_A: List[Any] = load_in_abit
_A: int = llm_inta_threshold
_A: Tuple = llm_inta_skip_modules
_A: Optional[Any] = llm_inta_enable_fpaa_cpu_offload
_A: Any = llm_inta_has_fpaa_weight
_A: str = bnb_abit_quant_type
_A: Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_A: int = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
_A: Dict = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __magic_name__ ( cls : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = cls(**lowerCAmelCase_ )
_A: List[Any] = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] ):
"""simple docstring"""
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
_A: Optional[int] = self.to_dict()
_A: List[str] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + '''\n'''
writer.write(lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = copy.deepcopy(self.__dict__ )
_A: Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : str ):
"""simple docstring"""
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : bool = True ):
"""simple docstring"""
if use_diff is True:
_A: Any = self.to_diff_dict()
else:
_A: List[str] = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.to_dict()
# get the default config dict
_A: Dict = BitsAndBytesConfig().to_dict()
_A: Tuple = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_A: str = value
return serializable_config_dict
| 301
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = ["""image_processor""", """tokenizer"""]
lowercase_ : Dict = """BlipImageProcessor"""
lowercase_ : List[str] = """AutoTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
# add QFormer tokenizer
_lowercase : int = qformer_tokenizer
def __call__( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 0, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
_lowercase : int = BatchFeature()
if text is not None:
_lowercase : Union[str, Any] = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
encoding.update(lowerCamelCase)
_lowercase : str = self.qformer_tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
_lowercase : Tuple = qformer_text_encoding.pop('input_ids')
_lowercase : Tuple = qformer_text_encoding.pop('attention_mask')
if images is not None:
_lowercase : Any = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase)
encoding.update(lowerCamelCase)
return encoding
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.tokenizer.model_input_names
_lowercase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if os.path.isfile(lowerCamelCase):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''')
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
_lowercase : Tuple = os.path.join(lowerCamelCase, 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(lowerCamelCase)
return super().save_pretrained(lowerCamelCase, **lowerCamelCase)
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase, subfolder='qformer_tokenizer')
_lowercase : Any = cls._get_arguments_from_pretrained(lowerCamelCase, **lowerCamelCase)
args.append(lowerCamelCase)
return cls(*lowerCamelCase)
| 21
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( _a : list[int] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = []
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = sum(_a )
create_state_space_tree(_a , _a , _a , _a , _a , _a )
return result
def lowerCamelCase_ ( _a : list[int] , _a : int , _a : int , _a : list[int] , _a : list[list[int]] , _a : int , ):
'''simple docstring'''
if sum(_a ) > max_sum or (remaining_nums_sum + sum(_a )) < max_sum:
return
if sum(_a ) == max_sum:
result.append(_a )
return
for index in range(_a , len(_a ) ):
create_state_space_tree(
_a , _a , index + 1 , [*path, nums[index]] , _a , remaining_nums_sum - nums[index] , )
UpperCamelCase_ = [3, 34, 4, 12, 5, 2]
UpperCamelCase_ = 9
UpperCamelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 59
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 59
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCamelCase (_a ):
def __init__( self: str,A_: VQModel,A_: UNetaDModel,A_: DDIMScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A_,unet=A_,scheduler=A_ )
@torch.no_grad()
def __call__( self: str,A_: int = 1,A_: Optional[Union[torch.Generator, List[torch.Generator]]] = None,A_: float = 0.0,A_: int = 50,A_: Optional[str] = "pil",A_: bool = True,**A_: int,):
'''simple docstring'''
__UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=A_,)
__UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCamelCase = {}
if accepts_eta:
__UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__UpperCamelCase = self.scheduler.scale_model_input(A_,A_ )
# predict the noise residual
__UpperCamelCase = self.unet(A_,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(A_,A_,A_,**A_ ).prev_sample
# decode the image latents with the VAE
__UpperCamelCase = self.vqvae.decode(A_ ).sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0,1 )
__UpperCamelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 310
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : str = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
_lowerCAmelCase : int = "▁"
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = BigBirdTokenizer
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
def __init__( self , __snake_case=None , __snake_case=None , __snake_case="<unk>" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="[SEP]" , __snake_case="[MASK]" , __snake_case="[CLS]" , **__snake_case , ) -> Tuple:
'''simple docstring'''
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
__a =vocab_file
__a =False if not self.vocab_file else True
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( _lowercase , unittest.TestCase):
UpperCAmelCase__ : int = UnCLIPImageVariationPipeline
UpperCAmelCase__ : Union[str, Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
UpperCAmelCase__ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Optional[Any] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
UpperCAmelCase__ : Optional[Any] = False
@property
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self :Tuple ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__A = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__A = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def lowercase_ ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(1 )
__A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
__A = self.dummy_decoder
__A = self.dummy_text_proj
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_super_res_first
__A = self.dummy_super_res_last
__A = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
__A = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
__A = CLIPImageProcessor(crop_size=32 , size=32 )
__A = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase_ ( self :int , _A :Tuple , _A :Tuple=0 , _A :List[str]=True ) -> str:
'''simple docstring'''
__A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
__A = torch.manual_seed(__UpperCamelCase )
else:
__A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
__A = input_image * 0.5 + 0.5
__A = input_image.clamp(0 , 1 )
__A = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**__UpperCamelCase )
__A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = pipe(**__UpperCamelCase )
__A = output.images
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**__UpperCamelCase )
__A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = pipe(**__UpperCamelCase )
__A = output.images
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**__UpperCamelCase )
__A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__A = pipe(**__UpperCamelCase )
__A = output.images
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__A = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__A = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
__A = torch.device('cpu' )
class UpperCamelCase__ :
UpperCAmelCase__ : str = 1
__A = self.get_dummy_components()
__A = self.pipeline_class(**__UpperCamelCase )
__A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__A = pipe.decoder.dtype
__A = 1
__A = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__A = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
__A = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__A = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
__A = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase ).images
__A = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
__A = pipeline_inputs.pop('image' )
__A = pipe.image_encoder(__UpperCamelCase ).image_embeds
__A = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase , image_embeddings=__UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def lowercase_ ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
__A = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__A = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , expected_max_diff=__UpperCamelCase )
@skip_mps
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = torch_device == 'cpu'
__A = True
__A = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
def lowercase_ ( self :Any ) -> Optional[Any]:
'''simple docstring'''
__A = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__A = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def lowercase_ ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :str ) -> Union[str, Any]:
'''simple docstring'''
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__A = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__A = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = pipeline(
__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , )
__A = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase , 15 )
| 161
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def lowercase__ ( __snake_case : Matrix , __snake_case : Matrix ):
'''simple docstring'''
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : float
for row in range(__snake_case ):
for col in range(__snake_case ):
UpperCAmelCase_ : Dict = matrix[row][col]
UpperCAmelCase_ : Union[str, Any] = vector[row][0]
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while row < size and col < size:
# pivoting
UpperCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
UpperCAmelCase_ : Optional[int] = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase_ : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : Matrix = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
UpperCAmelCase_ : Matrix = [[0] for _ in range(__snake_case )]
UpperCAmelCase_ : Matrix
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
UpperCAmelCase_ : int = (x_val + 1) ** (size - col - 1)
UpperCAmelCase_ : int = y_val
UpperCAmelCase_ : List[str] = solve(__snake_case , __snake_case )
def interpolated_func(__snake_case : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase__ ( __snake_case : Callable[[int], int] = question_function , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : list[int] = [func(__snake_case ) for x_val in range(1 , order + 1 )]
UpperCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Callable[[int], int]
UpperCAmelCase_ : int
for poly in polynomials:
UpperCAmelCase_ : Optional[int] = 1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 145
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=6 , snake_case_=17 , snake_case_=23 , snake_case_=11 , snake_case_=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = act_dim
__lowerCAmelCase = state_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_length
__lowerCAmelCase = is_training
def A__ ( self ) -> Tuple:
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Tuple:
__lowerCAmelCase = DecisionTransformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (DecisionTransformerModel,) if is_torch_available() else ()
_snake_case = ()
_snake_case = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_snake_case = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = DecisionTransformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def A__ ( self ) -> List[str]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DecisionTransformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ) -> int:
__lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCAmelCase = 10 # defined by the RL environment, may be normalized
__lowerCAmelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__lowerCAmelCase = model.to(snake_case_ )
__lowerCAmelCase = model.config
torch.manual_seed(0 )
__lowerCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case_ , dtype=torch.floataa ) # env.reset()
__lowerCAmelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case_ )
__lowerCAmelCase = torch.tensor(snake_case_ , device=snake_case_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCAmelCase = state
__lowerCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=snake_case_ , dtype=torch.floataa )
__lowerCAmelCase = torch.zeros(1 , 0 , device=snake_case_ , dtype=torch.floataa )
__lowerCAmelCase = torch.tensor(0 , device=snake_case_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case_ ):
__lowerCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case_ )] , dim=1 )
__lowerCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case_ )] , dim=1 )
__lowerCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model(
states=snake_case_ , actions=snake_case_ , rewards=snake_case_ , returns_to_go=snake_case_ , timesteps=snake_case_ , attention_mask=snake_case_ , return_dict=snake_case_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCAmelCase = action_pred[0, -1]
__lowerCAmelCase = torch.cat([states, state] , dim=1 )
__lowerCAmelCase = returns_to_go[0, -1] - reward
__lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 301
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Dict ={"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _A ( unittest.TestCase ):
snake_case__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowercase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def A__ ( self ):
"""simple docstring"""
import torch
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase = """HuggingFace is in"""
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowercase = ["""HuggingFace is in """, """Paris is in France"""]
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}, {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase = text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N, [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N] , )
lowercase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 360
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""PerceiverFeatureExtractor"""]
__lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( A_ ):
A__ : Dict = (DDIMParallelScheduler,)
A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(**snake_case__ )
snake_case : Any = scheduler_class(**snake_case__ )
snake_case , snake_case : Union[str, Any] = 10, 0.0
snake_case : List[Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ , eta=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : List[Any] = self.get_scheduler_config()
snake_case : int = scheduler_class(**snake_case__ )
snake_case , snake_case : Any = 10, 0.0
scheduler.set_timesteps(snake_case__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : str = self.dummy_sample_deter
snake_case : Dict = self.dummy_sample_deter + 0.1
snake_case : Dict = self.dummy_sample_deter - 0.1
snake_case : Optional[Any] = samplea.shape[0]
snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ )
snake_case : Dict = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = self.full_loop()
snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case : int = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : str = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 59
| 1
|
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
__magic_name__ = create_tensor(a )
__magic_name__ = gather(a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
__magic_name__ = [state.process_index]
__magic_name__ = gather_object(a )
assert len(a ) == state.num_processes, F'''{gathered_obj}, {len(a )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
__magic_name__ = create_tensor(a )
__magic_name__ = broadcast(a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__magic_name__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__magic_name__ = torch.arange(state.num_processes ).to(state.device )
__magic_name__ = pad_across_processes(a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
__magic_name__ = create_tensor(a )
__magic_name__ = reduce(a , '''sum''' )
__magic_name__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a , a ), F'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
__magic_name__ = create_tensor(a )
__magic_name__ = reduce(a , '''mean''' )
__magic_name__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a , a ), F'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
main()
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = PartialState()
state.print(F'''State: {state}''' )
state.print('''testing gather''' )
test_gather(a )
state.print('''testing gather_object''' )
test_gather_object(a )
state.print('''testing broadcast''' )
test_broadcast(a )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(a )
state.print('''testing reduce_sum''' )
test_reduce_sum(a )
state.print('''testing reduce_mean''' )
test_reduce_mean(a )
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowerCAmelCase = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''https://pypi.org/pypi/diffusers/json'''
__magic_name__ = json.loads(request.urlopen(a ).read() )['''releases'''].keys()
return sorted(a , key=lambda a : version.Version(a ) )
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a )
os.makedirs(a , exist_ok=a )
__magic_name__ = Path(a ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
init_hf_modules()
__magic_name__ = Path(a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a , exist_ok=a )
__magic_name__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
with open(a , '''r''' , encoding='''utf-8''' ) as f:
__magic_name__ = f.read()
# Imports of the form `import .xxx`
__magic_name__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a , flags=re.MULTILINE )
# Unique-ify
return list(set(a ) )
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = False
__magic_name__ = [module_file]
__magic_name__ = []
# Let's recurse through all relative imports
while not no_change:
__magic_name__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a ) )
__magic_name__ = Path(a ).parent
__magic_name__ = [str(module_path / m ) for m in new_imports]
__magic_name__ = [f for f in new_import_files if f not in all_relative_imports]
__magic_name__ = [F'''{f}.py''' for f in new_import_files]
__magic_name__ = len(a ) == 0
all_relative_imports.extend(a )
return all_relative_imports
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
with open(a , '''r''' , encoding='''utf-8''' ) as f:
__magic_name__ = f.read()
# Imports of the form `import xxx`
__magic_name__ = re.findall('''^\s*import\s+(\S+)\s*$''' , a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a , flags=re.MULTILINE )
# Only keep the top-level module
__magic_name__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__magic_name__ = list(set(a ) )
__magic_name__ = []
for imp in imports:
try:
importlib.import_module(a )
except ImportError:
missing_packages.append(a )
if len(a ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{", ".join(a )}. Run `pip install {" ".join(a )}`''' )
return get_relative_imports(a )
def UpperCamelCase ( a , a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = module_path.replace(os.path.sep , '''.''' )
__magic_name__ = importlib.import_module(a )
if class_name is None:
return find_pipeline_class(a )
return getattr(a , a )
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__magic_name__ = dict(inspect.getmembers(a , inspect.isclass ) )
__magic_name__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
__magic_name__ = cls
return pipeline_class
def UpperCamelCase ( a , a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = str(a )
__magic_name__ = os.path.join(a , a )
if os.path.isfile(a ):
__magic_name__ = module_file_or_url
__magic_name__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__magic_name__ = get_diffusers_versions()
# cut ".dev0"
__magic_name__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__magic_name__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
__magic_name__ = F'''v{revision}'''
elif revision == "main":
__magic_name__ = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
__magic_name__ = COMMUNITY_PIPELINES_URL.format(revision=a , pipeline=a )
try:
__magic_name__ = cached_download(
a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , )
__magic_name__ = '''git'''
__magic_name__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
__magic_name__ = hf_hub_download(
a , a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , )
__magic_name__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
__magic_name__ = check_imports(a )
# Now we move the module inside our cached dynamic modules.
__magic_name__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a )
__magic_name__ = Path(a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a , submodule_path / module_file )
for module_needed in modules_needed:
__magic_name__ = F'''{module_needed}.py'''
shutil.copy(os.path.join(a , a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a , a ):
__magic_name__ = use_auth_token
elif use_auth_token is True:
__magic_name__ = HfFolder.get_token()
else:
__magic_name__ = None
__magic_name__ = model_info(a , revision=a , token=a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__magic_name__ = submodule_path / commit_hash
__magic_name__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a )
if not (submodule_path / module_file).exists():
shutil.copy(a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a , F'''{module_needed}.py''' , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
return os.path.join(a , a )
def UpperCamelCase ( a , a , a = None , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = get_cached_module_file(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
return get_class_in_module(a , final_module.replace('''.py''' , '''''' ) )
| 98
| 0
|
from ...processing_utils import ProcessorMixin
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = '''WhisperFeatureExtractor'''
_UpperCamelCase : str = '''WhisperTokenizer'''
def __init__( self : Optional[Any] , _A : Dict , _A : str ) -> List[Any]:
"""simple docstring"""
super().__init__(_A , _A )
lowercase : Dict = self.feature_extractor
lowercase : Dict = False
def __a ( self : Tuple , _A : List[Any]=None , _A : List[Any]=None , _A : List[str]=True ) -> Any:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_A , language=_A , no_timestamps=_A )
def __call__( self : str , *_A : int , **_A : Dict ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
lowercase : Optional[Any] = kwargs.pop('''audio''' , _A )
lowercase : Dict = kwargs.pop('''sampling_rate''' , _A )
lowercase : List[str] = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
lowercase : Optional[Any] = args[0]
lowercase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase : List[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
lowercase : Optional[Any] = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : List[Any] = encodings['''input_ids''']
return inputs
def __a ( self : List[str] , *_A : Optional[Any] , **_A : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __a ( self : Optional[Any] , *_A : Tuple , **_A : Tuple ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
def __a ( self : Optional[Any] , _A : str , _A : str="np" ) -> Any:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_A , return_tensors=_A )
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
import numpy as np
import datasets
_UpperCAmelCase = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_UpperCAmelCase = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_UpperCAmelCase = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Dict )->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = np.array(_snake_case )
__lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
__lowerCAmelCase : List[Any] = X - np.mean(_snake_case )
__lowerCAmelCase : List[str] = np.cov(reference_distribution.T )
try:
__lowerCAmelCase : Union[str, Any] = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
__lowerCAmelCase : Optional[Any] = np.linalg.pinv(_snake_case )
__lowerCAmelCase : Optional[int] = np.dot(_snake_case , _snake_case )
__lowerCAmelCase : int = np.dot(_snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 232
|
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : str = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : str = maximum_claim_table
def UpperCAmelCase__ ( self : Tuple )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : Any = self.__allocated_resources_table
__lowerCAmelCase : List[Any] = self.__available_resources()
__lowerCAmelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : int = False
break
if execution:
__lowerCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Any = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : int = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
__lowerCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
else:
__lowerCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
__lowerCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
__lowerCamelCase = ["key_proj", "value_proj", "query_proj"]
__lowerCamelCase = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
__lowerCamelCase = key.split(""".""" )
if attributes[0] == "lm_head":
__lowerCamelCase = prophet
__lowerCamelCase = prophet_old
else:
__lowerCamelCase = prophet.prophetnet
__lowerCamelCase = prophet_old.model
__lowerCamelCase = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase = mapping[attribute]
if not hasattr(a_ , a_ ) and len(a_ ) > 0:
__lowerCamelCase = attribute
elif hasattr(a_ , a_ ):
__lowerCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase = old_model.weight
logger.info(f'{attribute} is initialized.' )
__lowerCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase = old_model.bias
logger.info(f'{attribute} is initialized' )
__lowerCamelCase = True
break
elif attribute in special_keys and hasattr(a_ , """in_proj_weight""" ):
__lowerCamelCase = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase = getattr(a_ , a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase = True
break
if attribute.isdigit():
__lowerCamelCase = model[int(a_ )]
__lowerCamelCase = old_model[int(a_ )]
else:
__lowerCamelCase = getattr(a_ , a_ )
if old_attribute == "":
__lowerCamelCase = old_model
else:
if not hasattr(a_ , a_ ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
__lowerCamelCase = getattr(a_ , a_ )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 12
|
'''simple docstring'''
from __future__ import annotations
__a = list[tuple[int, int]]
__a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : Node | None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = pos_x
_UpperCAmelCase : List[Any] = pos_y
_UpperCAmelCase : Optional[int] = (pos_y, pos_x)
_UpperCAmelCase : Tuple = goal_x
_UpperCAmelCase : List[str] = goal_y
_UpperCAmelCase : str = g_cost
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : str = self.calculate_heuristic()
def _lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = abs(self.pos_x - self.goal_x )
_UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , lowerCAmelCase__ : Optional[int] ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
_UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [self.start]
_UpperCAmelCase : list[Node] = []
_UpperCAmelCase : List[Any] = False
def _lowerCAmelCase ( self : Tuple ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase : List[str] = True
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : List[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Node ) -> list[Node]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
for action in delta:
_UpperCAmelCase : Tuple = parent.pos_x + action[1]
_UpperCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Node | None ) -> Path:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = node
_UpperCAmelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a = (0, 0)
__a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__a = GreedyBestFirst(init, goal)
__a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a = 2
for elem in grid:
print(elem)
| 145
| 0
|
import pprint
import requests
__a = 'https://zenquotes.io/api'
def lowerCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
__a = random_quotes()
pprint.pprint(response)
| 235
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = True
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[Any] = Encoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,)
# pass init params to Decoder
UpperCAmelCase_ : List[str] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : List[Any] = self.config.sample_size
UpperCAmelCase_ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : int = 0.25
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = use_tiling
def a__ ( self ) -> Optional[Any]:
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = True
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : int = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = []
for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = []
for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = sample
UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = posterior.mode()
UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
| 235
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class __A:
def __init__( self ) -> None:
'''simple docstring'''
__a = []
__a = 0
__a = 0
def SCREAMING_SNAKE_CASE_ ( self ) -> bool:
'''simple docstring'''
return self.head == self.tail
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
self.data.append(SCREAMING_SNAKE_CASE__ )
__a = self.tail + 1
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.data[self.head]
__a = self.head + 1
return ret
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.tail - self.head
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class __A:
def __init__( self , _snake_case ) -> None:
'''simple docstring'''
__a = data
__a = None
__a = None
__a = 1
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return self.data
def SCREAMING_SNAKE_CASE_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.left
def SCREAMING_SNAKE_CASE_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.right
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.height
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = data
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = node
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = node
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = height
def __lowerCAmelCase ( a__ ) -> int:
if node is None:
return 0
return node.get_height()
def __lowerCAmelCase ( a__ , a__ ) -> int:
if a > b:
return a
return b
def __lowerCAmelCase ( a__ ) -> MyNode:
print('''left rotation node:''' , node.get_data() )
__a = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def __lowerCAmelCase ( a__ ) -> MyNode:
print('''right rotation node:''' , node.get_data() )
__a = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def __lowerCAmelCase ( a__ ) -> MyNode:
__a = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def __lowerCAmelCase ( a__ ) -> MyNode:
__a = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def __lowerCAmelCase ( a__ , a__ ) -> MyNode | None:
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__a = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__a = right_rotation(__A )
else:
__a = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__a = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__a = rl_rotation(__A )
else:
__a = left_rotation(__A )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def __lowerCAmelCase ( a__ ) -> Any:
while True:
__a = root.get_right()
if right_child is None:
break
__a = right_child
return root.get_data()
def __lowerCAmelCase ( a__ ) -> Any:
while True:
__a = root.get_left()
if left_child is None:
break
__a = left_child
return root.get_data()
def __lowerCAmelCase ( a__ , a__ ) -> MyNode | None:
__a = root.get_left()
__a = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__a = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
__a = left_child
elif right_child is not None:
__a = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__a = left_rotation(__A )
else:
__a = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__a = right_rotation(__A )
else:
__a = lr_rotation(__A )
__a = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class __A:
def __init__( self ) -> None:
'''simple docstring'''
__a = None
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return get_height(self.root )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
print('''insert:''' + str(SCREAMING_SNAKE_CASE__ ) )
__a = insert_node(self.root , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
print('''delete:''' + str(SCREAMING_SNAKE_CASE__ ) )
if self.root is None:
print('''Tree is empty!''' )
return
__a = del_node(self.root , SCREAMING_SNAKE_CASE__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
__a = ''
__a = MyQueue()
q.push(self.root )
__a = self.get_height()
if layer == 0:
return output
__a = 0
while not q.is_empty():
__a = q.pop()
__a = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(SCREAMING_SNAKE_CASE__ )
q.push(SCREAMING_SNAKE_CASE__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__a = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , SCREAMING_SNAKE_CASE__ ) - 1:
__a = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
A : Dict = AVLtree()
A : Union[str, Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 6
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 32
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Tuple = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
_lowercase : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase__)})
_lowercase : str = field(
default=UpperCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""})
_lowercase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowercase : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowercase : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""})
_lowercase : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_lowercase : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_lowercase : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowercase : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""})
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """train"""
_lowercase : Any = """dev"""
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = Split.train , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "pt" , ) -> str:
'''simple docstring'''
a__ : List[Any] =args
a__ : int =is_language_sensitive
a__ : List[str] =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
a__ : str =Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
a__ : Any =mode
# Load data features from cache or dataset file
a__ : str ="v2" if args.version_2_with_negative else "v1"
a__ : str =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : Dict =cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache:
a__ : Any =time.time()
a__ : List[Any] =torch.load(lowerCAmelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : str =self.old_features["features"]
a__ : str =self.old_features.get("dataset" , lowerCAmelCase__ )
a__ : Optional[int] =self.old_features.get("examples" , lowerCAmelCase__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
a__ : Dict =self.processor.get_dev_examples(args.data_dir )
else:
a__ : str =self.processor.get_train_examples(args.data_dir )
a__ , a__ : Union[str, Any] =squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCAmelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCAmelCase__ , )
a__ : Any =time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowerCAmelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : str =self.features[i]
a__ : Optional[Any] =torch.tensor(feature.input_ids , dtype=torch.long )
a__ : List[Any] =torch.tensor(feature.attention_mask , dtype=torch.long )
a__ : int =torch.tensor(feature.token_type_ids , dtype=torch.long )
a__ : List[str] =torch.tensor(feature.cls_index , dtype=torch.long )
a__ : int =torch.tensor(feature.p_mask , dtype=torch.float )
a__ : Tuple =torch.tensor(feature.is_impossible , dtype=torch.float )
a__ : Tuple ={
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
a__ : int =torch.tensor(feature.start_position , dtype=torch.long )
a__ : Any =torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 148
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__UpperCAmelCase ):
def __init__( self : Any , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any ) -> Union[str, Any]:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 268
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _A ( self , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.random.RandomState(_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
__SCREAMING_SNAKE_CASE = pipe.tokenizer(
_A , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors='np' , )
__SCREAMING_SNAKE_CASE = text_inputs['input_ids']
__SCREAMING_SNAKE_CASE = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__SCREAMING_SNAKE_CASE = prompt_embeds
# forward
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
__SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
__SCREAMING_SNAKE_CASE = pipe.tokenizer(
_A , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors='np' , )
__SCREAMING_SNAKE_CASE = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = embeds
# forward
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = 'open neural network exchange'
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = 'open neural network exchange'
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def test_callback_fn(_A , _A , _A ) -> None:
__SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = 'Andromeda galaxy in a bottle'
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
pipe(
prompt=_A , num_inference_steps=5 , guidance_scale=7.5 , generator=_A , callback=_A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_A , _A )
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(_A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 118
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=12 , _A=7 , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , _A=0 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__SCREAMING_SNAKE_CASE = input_mask.numpy()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_mask.shape
__SCREAMING_SNAKE_CASE = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def _A ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFBlipTextModel(config=_A )
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , training=_A )
__SCREAMING_SNAKE_CASE = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Tuple = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlipTextModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _A ( self , _A=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 118
| 1
|
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float) -> float:
'''simple docstring'''
if (
not isinstance(_lowerCamelCase , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * power_factor
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float) -> float:
'''simple docstring'''
if (
not isinstance(_lowerCamelCase , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image:
'''simple docstring'''
__UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowercase : Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 232
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _A ( ):
lowercase__ = torch.nn.Linear(2 , 4 )
lowercase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowercase__ = torch.optim.lr_scheduler.OneCycleLR(__magic_name__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowercase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _A ( __magic_name__ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _A ( __magic_name__ ):
lowercase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
@require_cuda
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowercase ):
lowercase__ = Accelerator(cpu=_lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ = GradientState()
assert state.num_steps == 1
lowercase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowercase :int , **_lowercase :Optional[int] ):
pass
with patch("torch.cuda.set_device" , _lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
lowercase__ = get_signature(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
lowercase__ = get_signature(_lowercase )
# saving hook
def save_config(_lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :Optional[int] ):
lowercase__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(_lowercase , "data.json" ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
# loading hook
def load_config(_lowercase :List[str] , _lowercase :str ):
with open(os.path.join(_lowercase , "data.json" ) , "r" ) as f:
lowercase__ = json.load(_lowercase )
lowercase__ = config["class_name"]
lowercase__ = accelerator.register_save_state_pre_hook(_lowercase )
lowercase__ = accelerator.register_load_state_pre_hook(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
lowercase__ = None
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
lowercase__ = [1, 2, 3]
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map={"": 0} , )
lowercase__ = Accelerator()
# This should work
lowercase__ = accelerator.prepare(_lowercase )
@slow
@require_bnb
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase__ = Accelerator()
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ = infer_auto_device_map(_lowercase )
lowercase__ = "cpu"
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=_lowercase , load_in_abit=_lowercase , llm_inta_enable_fpaa_cpu_offload=_lowercase )
# This should not work and get value error
with self.assertRaises(_lowercase ):
lowercase__ = accelerator.prepare(_lowercase )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ = infer_auto_device_map(_lowercase )
lowercase__ = 1
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map=_lowercase , )
lowercase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowercase ):
lowercase__ = accelerator.prepare(_lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowercase__ = infer_auto_device_map(_lowercase )
lowercase__ = 1
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map=_lowercase , )
lowercase__ = Accelerator()
# This should work
lowercase__ = accelerator.prepare(_lowercase )
@require_cuda
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = torch.nn.Linear(10 , 10 )
lowercase__ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase__ = Accelerator(cpu=_lowercase )
lowercase__ = accelerator.prepare(_lowercase )
| 201
|
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 201
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
_a : List[Any] = torch.exp(__a )
_a : Optional[Any] = torch.sum(__a ,dim=1 ) # sum of exp(x_i)
_a : str = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__a ) - B / A
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _a ) -> Union[str, Any]:
super().__init__()
_a : List[str] = config.output_attentions
_a : List[str] = config.output_hidden_states
_a : Tuple = nn.ModuleList([BertLayer(_a ) for _ in range(config.num_hidden_layers )] )
_a : Tuple = nn.ModuleList([BertHighway(_a ) for _ in range(config.num_hidden_layers )] )
_a : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self , _a ) -> Any:
if (type(_a ) is float) or (type(_a ) is int):
for i in range(len(self.early_exit_entropy ) ):
_a : List[str] = x
else:
_a : Tuple = x
def __lowercase ( self , _a ) -> Any:
_a : List[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self , _a , _a=None , _a=None , _a=None , _a=None , ) -> int:
_a : Optional[int] = ()
_a : Any = ()
_a : int = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_a : List[str] = all_hidden_states + (hidden_states,)
_a : List[Any] = layer_module(
_a , _a , head_mask[i] , _a , _a )
_a : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_a : Dict = all_attentions + (layer_outputs[1],)
_a : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_a : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
_a : Tuple = current_outputs + (all_attentions,)
_a : Dict = self.highway[i](_a )
# logits, pooled_output
if not self.training:
_a : Optional[int] = highway_exit[0]
_a : List[str] = entropy(_a )
_a : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_a : Optional[int] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_a : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_a , i + 1 )
else:
_a : Tuple = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_a : Dict = all_hidden_states + (hidden_states,)
_a : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
_a : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
_a : Optional[int] = outputs + (all_attentions,)
_a : Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a ) -> List[str]:
super().__init__(_a )
_a : Tuple = config
_a : int = BertEmbeddings(_a )
_a : Dict = DeeBertEncoder(_a )
_a : str = BertPooler(_a )
self.init_weights()
def __lowercase ( self ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self ) -> Union[str, Any]:
return self.embeddings.word_embeddings
def __lowercase ( self , _a ) -> Tuple:
_a : Optional[int] = value
def __lowercase ( self , _a ) -> Optional[Any]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_a )
@add_start_docstrings_to_model_forward(_a )
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Tuple:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_a : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
_a : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_a : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_a : Dict = torch.ones(_a , device=_a )
if encoder_attention_mask is None:
_a : List[str] = torch.ones(_a , device=_a )
if token_type_ids is None:
_a : Optional[int] = torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_a : torch.Tensor = self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_a : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_a : Optional[int] = encoder_attention_mask[:, None, None, :]
_a : int = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_a : Tuple = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_a : str = self.get_head_mask(_a , self.config.num_hidden_layers )
_a : List[str] = self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_a : Optional[Any] = self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_a : Union[str, Any] = encoder_outputs[0]
_a : str = self.pooler(_a )
_a : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a ) -> Dict:
_a : str = message
_a : Optional[int] = exit_layer # start from 1!
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _a ) -> Any:
super().__init__()
_a : Optional[Any] = BertPooler(_a )
_a : int = nn.Dropout(config.hidden_dropout_prob )
_a : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self , _a ) -> Union[str, Any]:
# Pooler
_a : Any = encoder_outputs[0]
_a : Any = self.pooler(_a )
# "return" pooler_output
# BertModel
_a : Optional[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_a : Dict = bmodel_output[1]
_a : Optional[Any] = self.dropout(_a )
_a : List[Any] = self.classifier(_a )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a ) -> Any:
super().__init__(_a )
_a : List[Any] = config.num_labels
_a : str = config.num_hidden_layers
_a : Tuple = DeeBertModel(_a )
_a : Tuple = nn.Dropout(config.hidden_dropout_prob )
_a : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Optional[Any]:
_a : List[Any] = self.num_layers
try:
_a : str = self.bert(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_a : Any = outputs[1]
_a : int = self.dropout(_a )
_a : Optional[Any] = self.classifier(_a )
_a : str = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_a : List[Any] = e.message
_a : List[Any] = e.exit_layer
_a : List[str] = outputs[0]
if not self.training:
_a : Union[str, Any] = entropy(_a )
_a : Optional[int] = []
_a : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_a : Dict = MSELoss()
_a : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_a : Union[str, Any] = CrossEntropyLoss()
_a : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_a : Union[str, Any] = []
for highway_exit in outputs[-1]:
_a : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_a : Tuple = MSELoss()
_a : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_a : Union[str, Any] = CrossEntropyLoss()
_a : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_a : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_a : Union[str, Any] = (loss,) + outputs
if not self.training:
_a : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_a : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 235
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "roformer"
def __init__( self , _a=5_0_0_0_0 , _a=None , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_5_3_6 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : List[Any] = hidden_size if embedding_size is None else embedding_size
_a : Any = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Dict = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Dict = rotary_value
_a : Dict = use_cache
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 235
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''This is me''',return_tensors='''pt''' )
A__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A__ = model.generate(**__lowerCamelCase )
A__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A__ = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase,__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
A__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
A__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 364
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ) -> Tuple:
lowerCAmelCase_ :Dict = []
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> List[str]:
self.events.append("""on_init_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_train_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Tuple:
self.events.append("""on_train_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_epoch_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_epoch_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_step_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_step_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_evaluate""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> int:
self.events.append("""on_predict""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Union[str, Any]:
self.events.append("""on_save""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Dict:
self.events.append("""on_log""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_prediction_step""" )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = tempfile.mkdtemp()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self , __A=0 , __A=0 , __A=64 , __A=64 , __A=None , __A=False , **__A ) -> Optional[int]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase_ :Optional[Any] = RegressionDataset(length=__A )
lowerCAmelCase_ :str = RegressionDataset(length=__A )
lowerCAmelCase_ :Dict = RegressionModelConfig(a=__A , b=__A )
lowerCAmelCase_ :Dict = RegressionPreTrainedModel(__A )
lowerCAmelCase_ :Any = TrainingArguments(self.output_dir , disable_tqdm=__A , report_to=[] , **__A )
return Trainer(
__A , __A , train_dataset=__A , eval_dataset=__A , callbacks=__A , )
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
self.assertEqual(len(__A ) , len(__A ) )
# Order doesn't matter
lowerCAmelCase_ :Optional[int] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
lowerCAmelCase_ :List[Any] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
for cba, cba in zip(__A , __A ):
if isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(__A , __A )
elif isinstance(__A , __A ) and not isinstance(__A , __A ):
self.assertEqual(__A , cba.__class__ )
elif not isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(cba.__class__ , __A )
else:
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Optional[int] = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :Optional[Any] = len(trainer.get_eval_dataloader() )
lowerCAmelCase_ :List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.get_trainer()
lowerCAmelCase_ :Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase_ :List[Any] = self.get_trainer(disable_tqdm=__A )
lowerCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase_ :Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.pop_callback(__A )
self.assertEqual(cb.__class__ , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# We can also add, pop, or remove by instance
lowerCAmelCase_ :str = self.get_trainer()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[0]
lowerCAmelCase_ :Dict = trainer.pop_callback(__A )
self.assertEqual(__A , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__A )
lowerCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# Independent log/save/eval
lowerCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase_ :List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# A bit of everything
lowerCAmelCase_ :Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase_ :Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__A ) in warn_mock.call_args[0][0]
| 84
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase__ ( ):
snake_case : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Tuple = False
# source code of `config_class`
snake_case : Tuple = inspect.getsource(lowercase__ )
snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : Any = True
break
snake_case : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 148
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
def a__ ( __UpperCamelCase ):
def merge(__UpperCamelCase , __UpperCamelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__UpperCamelCase ) <= 1:
return collection
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A : List[str] = input("Enter numbers separated by a comma:\n").strip()
A : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 118
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 118
| 1
|
snake_case : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Any = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : List[str] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : Any = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : str = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : Any = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : str = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[str] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : Dict = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Tuple = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : int = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : int = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
__magic_name__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = ["input_ids", "attention_mask"]
a : Optional[int] = BartTokenizer
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__="replace", __magic_name__="<s>", __magic_name__="</s>", __magic_name__="</s>", __magic_name__="<s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<mask>", __magic_name__=False, __magic_name__=True, **__magic_name__, ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__, __magic_name__, tokenizer_file=__magic_name__, errors=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, sep_token=__magic_name__, cls_token=__magic_name__, unk_token=__magic_name__, pad_token=__magic_name__, mask_token=__magic_name__, add_prefix_space=__magic_name__, trim_offsets=__magic_name__, **__magic_name__, )
UpperCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', __magic_name__ ) != add_prefix_space:
UpperCamelCase__ : List[Any] = getattr(__magic_name__, pre_tok_state.pop('''type''' ) )
UpperCamelCase__ : int = add_prefix_space
UpperCamelCase__ : List[Any] = pre_tok_class(**__magic_name__ )
UpperCamelCase__ : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase__ : int = '''post_processor'''
UpperCamelCase__ : Union[str, Any] = getattr(self.backend_tokenizer, __magic_name__, __magic_name__ )
if tokenizer_component_instance:
UpperCamelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCamelCase__ : List[Any] = tuple(state['''cls'''] )
UpperCamelCase__ : Optional[int] = False
if state.get('''add_prefix_space''', __magic_name__ ) != add_prefix_space:
UpperCamelCase__ : Dict = add_prefix_space
UpperCamelCase__ : Optional[Any] = True
if state.get('''trim_offsets''', __magic_name__ ) != trim_offsets:
UpperCamelCase__ : Union[str, Any] = trim_offsets
UpperCamelCase__ : Optional[int] = True
if changes_to_apply:
UpperCamelCase__ : str = getattr(__magic_name__, state.pop('''type''' ) )
UpperCamelCase__ : Dict = component_class(**__magic_name__ )
setattr(self.backend_tokenizer, __magic_name__, __magic_name__ )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = AddedToken(__magic_name__, lstrip=__magic_name__, rstrip=__magic_name__ ) if isinstance(__magic_name__, __magic_name__ ) else value
UpperCamelCase__ : Dict = value
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : List[Any] = kwargs.get('''is_split_into_words''', __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : List[str] = kwargs.get('''is_split_into_words''', __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : str = self._tokenizer.model.save(__magic_name__, name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = [self.sep_token_id]
UpperCamelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 201
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
| 1
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=[] ) ->Union[str, Any]:
"""simple docstring"""
a_ = size[0] - overlap_pixels * 2
a_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a_ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
a_ = np.pad(_A , mode="linear_ramp" , pad_width=_A , end_values=0 )
if "l" in remove_borders:
a_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return max(_A , min(_A , _A ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = list(_A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a_ = clamp_rect(_A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_A , (original_slice, 0) )
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a_ = tile.crop(_A )
return tile
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = n % d
return n - divisor
class snake_case ( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 3_50 , ) ->Optional[Any]:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Dict:
torch.manual_seed(0)
a_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size),
min(image.size[0] , (x + 1) * tile_size),
min(image.size[1] , (y + 1) * tile_size),
)
a_ = add_overlap_rect(__snake_case , __snake_case , image.size)
a_ = image.crop(__snake_case)
a_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a_ = translated_slice_x - (original_image_slice / 2)
a_ = max(0 , __snake_case)
a_ = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case)
a_ = to_input.size
a_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC)
a_ = super(__snake_case , self).__call__(image=__snake_case , **__snake_case).images[0]
a_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC)
a_ = unsqueeze_tile(__snake_case , __snake_case)
a_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC)
a_ = []
if x == 0:
remove_borders.append("l")
elif crop_rect[2] == image.size[0]:
remove_borders.append("r")
if y == 0:
remove_borders.append("t")
elif crop_rect[3] == image.size[1]:
remove_borders.append("b")
a_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case) , mode="L" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case)
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 75 , __UpperCAmelCase = 9.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_28 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , ) ->Union[str, Any]:
a_ = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4))
a_ = math.ceil(image.size[0] / tile_size)
a_ = math.ceil(image.size[1] / tile_size)
a_ = tcx * tcy
a_ = 0
for y in range(__snake_case):
for x in range(__snake_case):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image})
return final_image
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = 'stabilityai/stable-diffusion-x4-upscaler'
a_ = StableDiffusionTiledUpscalePipeline.from_pretrained(_A , revision="fp16" , torch_dtype=torch.floataa )
a_ = pipe.to("cuda" )
a_ = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(UpperCAmelCase ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
a_ = pipe(image=_A , prompt="Black font, white background, vector" , noise_level=40 , callback=_A )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCamelCase , _UpperCamelCase = head.next, head
while fast and fast.next:
_UpperCamelCase = fast.next.next
_UpperCamelCase = slow.next
_UpperCamelCase = slow.next
_UpperCamelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCamelCase = None
while second:
_UpperCamelCase = second.next
_UpperCamelCase = node
_UpperCamelCase = second
_UpperCamelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCamelCase = node.next
_UpperCamelCase = head.next
return True
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = head
while fast and fast.next:
_UpperCamelCase , _UpperCamelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCamelCase = [slow.val]
while slow.next:
_UpperCamelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCamelCase = cur.next
return True
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCamelCase = {}
_UpperCamelCase = 0
while head:
if head.val in d:
d[head.val].append(__lowerCAmelCase )
else:
_UpperCamelCase = [pos]
_UpperCamelCase = head.next
pos += 1
_UpperCamelCase = pos - 1
_UpperCamelCase = 0
for v in d.values():
if len(__lowerCAmelCase ) % 2 != 0:
middle += 1
else:
_UpperCamelCase = 0
for i in range(0, len(__lowerCAmelCase ) ):
if v[i] + v[len(__lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 194
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = emb.weight.shape
UpperCAmelCase_ : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
UpperCAmelCase_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _lowercase , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : str = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase_ : Union[str, Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase_ : int = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : str = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase_ : Any = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : str = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCAmelCase_ : str = state_dict[old_key]
return new_dict
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
UpperCAmelCase_ : str = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = os.path.join(
__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCamelCase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase_ : Union[str, Any] = torch.load(switch_checkpoint_path + '''-shared.pt''' )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
UpperCAmelCase_ : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase , __lowerCamelCase )
# Otherwise, let's build the index
UpperCAmelCase_ : Tuple = {}
for idx, shard in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin''' )
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
for key in shard:
UpperCAmelCase_ : str = shard_file
# Add the metadata
UpperCAmelCase_ : int = {"total_size": total_size}
UpperCAmelCase_ : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : int = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__a = parser.parse_args()
__a ,__a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__a = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 351
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = True
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[Any] = Encoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,)
# pass init params to Decoder
UpperCAmelCase_ : List[str] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : List[Any] = self.config.sample_size
UpperCAmelCase_ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : int = 0.25
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = use_tiling
def a__ ( self ) -> Optional[Any]:
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = True
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : int = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = []
for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = []
for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = sample
UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = posterior.mode()
UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
| 235
| 0
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = self.dummy_uncond_unet
lowercase_ = KarrasVeScheduler()
lowercase_ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ) -> int:
lowercase_ = '''google/ncsnpp-celebahq-256'''
lowercase_ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = KarrasVeScheduler()
lowercase_ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase_ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase__ :Optional[Any] = logging.get_logger(__name__)
class lowercase ( snake_case__ ):
def __init__( self ,*A__ ,**A__):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' ,UpperCAmelCase_ ,)
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_)
| 365
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97
| 0
|
import operator as op
_a = '''scaler.pt'''
_a = '''pytorch_model'''
_a = '''random_states'''
_a = '''optimizer'''
_a = '''scheduler'''
_a = '''pytorch_model.bin'''
_a = '''pytorch_model.bin.index.json'''
_a = '''model.safetensors'''
_a = '''model.safetensors.index.json'''
_a = '''1.10.2'''
_a = '''py38'''
_a = '''4.17.0'''
_a = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
_a = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
_a = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
_a = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
_a = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
_a = '''2.0.1'''
_a = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
_a = ['''default''', '''reduce-overhead''', '''max-autotune''']
_a = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_a = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
_a = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
_a = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 322
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
from typing import Any
import numpy as np
def _snake_case ( lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Any = v.conjugate().T
lowerCAmelCase_ :Any = v_star.dot(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ ))
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase_ :Any = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase__ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCAmelCase_ :int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase__ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 358
|
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : int = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
_a : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
_a : List[str] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : int = 2
_a : str = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,)
_a : int = floats_tensor(control_image.shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Tuple ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : Tuple ):
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
_a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : str = MultiControlNetModel([controlneta, controlneta] )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : str = 2
_a : Union[str, Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
]
_a : str = floats_tensor(control_image[0].shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Optional[int] ):
_a : Dict = self.get_dummy_components()
_a : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
_a : Any = 10.0
_a : Optional[Any] = 4
_a : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : Optional[int] = scale
_a : Any = pipe(**_UpperCAmelCase )[0]
_a : Any = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = steps
_a : List[str] = scale
_a : List[Any] = pipe(**_UpperCAmelCase ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : int = scale
_a : List[str] = pipe(**_UpperCAmelCase ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = steps
_a : str = scale
_a : int = pipe(**_UpperCAmelCase ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowercase ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowercase ( self : List[str] ):
_a : Optional[int] = self.get_dummy_components()
_a : Dict = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_a : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=_UpperCAmelCase ,controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Union[str, Any] = 'evil space-punk bird'
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_a : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_a : Optional[Any] = pipe(
_UpperCAmelCase ,_UpperCAmelCase ,control_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
_a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
_a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 89
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> None:
_snake_case = set_counts
_snake_case = max(UpperCAmelCase )
_snake_case = len(UpperCAmelCase )
_snake_case = [1] * num_sets
_snake_case = list(range(UpperCAmelCase ) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> bool:
_snake_case = self.get_parent(UpperCAmelCase )
_snake_case = self.get_parent(UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_snake_case = 0
_snake_case = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_snake_case = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_snake_case = 0
_snake_case = src_parent
_snake_case = self.set_counts[src_parent]
_snake_case = max(self.max_set , UpperCAmelCase )
return True
def lowercase (self , UpperCAmelCase ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
_snake_case = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 351
|
'''simple docstring'''
from math import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
for i in range(1 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(_SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(_SCREAMING_SNAKE_CASE ):
total += i
return total - n
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1_0000 ):
_snake_case = sum(
i
for i in range(1 , _SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(_SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(_SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 270
| 0
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "roformer"
def __init__( self , _a=5_0_0_0_0 , _a=None , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_5_3_6 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : List[Any] = hidden_size if embedding_size is None else embedding_size
_a : Any = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Dict = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Dict = rotary_value
_a : Dict = use_cache
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 235
| 0
|
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
a : List[Any] = int(A_ )
if n_element < 1:
a : Dict = ValueError('a should be a positive number' )
raise my_error
a : int = [1]
a, a, a : List[str] = (0, 0, 0)
a : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCamelCase : int = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCamelCase : Union[str, Any] = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 186
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = 'docs/source/en/_toctree.yml'
def snake_case (A_ :Optional[Any] ):
'''simple docstring'''
a : List[Any] = defaultdict(A_ )
for doc in model_doc:
counts[doc["local"]] += 1
a : Optional[Any] = [key for key, value in counts.items() if value > 1]
a : List[str] = []
for duplicate_key in duplicates:
a : int = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(A_ , key=lambda A_ : s["title"].lower() )
def snake_case (A_ :List[str]=False ):
'''simple docstring'''
with open(A_ , encoding='utf-8' ) as f:
a : Dict = yaml.safe_load(f.read() )
# Get to the API doc
a : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a : List[str] = content[api_idx]['sections']
# Then to the model doc
a : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a : Optional[Any] = api_doc[model_idx]['sections']
a : Dict = [(idx, section) for idx, section in enumerate(A_ ) if 'sections' in section]
a : List[str] = False
for idx, modality_doc in modalities_docs:
a : str = modality_doc['sections']
a : str = clean_model_doc_toc(A_ )
if old_modality_doc != new_modality_doc:
a : str = True
if overwrite:
a : Any = new_modality_doc
if diff:
if overwrite:
a : Any = model_doc
a : str = api_doc
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(A_ , allow_unicode=A_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 186
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
| 97
| 0
|
"""simple docstring"""
def _lowerCamelCase( a ):
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__a = sum(a ) / len(a ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:int = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : List[Any] = """focalnet"""
def __init__( self , lowerCamelCase=224 , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=96 , lowerCamelCase=False , lowerCamelCase=[192, 384, 768, 768] , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[2, 2, 2, 2] , lowerCamelCase=[3, 3, 3, 3] , lowerCamelCase="gelu" , lowerCamelCase=4.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=False , lowerCamelCase=1E-4 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = use_conv_embed
__a = hidden_sizes
__a = depths
__a = focal_levels
__a = focal_windows
__a = hidden_act
__a = mlp_ratio
__a = hidden_dropout_prob
__a = drop_path_rate
__a = use_layerscale
__a = layerscale_value
__a = use_post_layernorm
__a = use_post_layernorm_in_modulation
__a = normalize_modulator
__a = initializer_range
__a = layer_norm_eps
__a = encoder_stride
__a = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 268
| 0
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :int = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ :int = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase__ :str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :List[str] = inputs['''prompt''']
UpperCamelCase__ :Tuple = inputs['''generator''']
UpperCamelCase__ :Optional[Any] = inputs['''num_inference_steps''']
UpperCamelCase__ :List[str] = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase__ :Optional[int] = inputs['''image''']
else:
UpperCamelCase__ :Optional[Any] = None
if "mask_image" in inputs:
UpperCamelCase__ :List[Any] = inputs['''mask_image''']
else:
UpperCamelCase__ :Union[str, Any] = None
if "original_image" in inputs:
UpperCamelCase__ :Optional[Any] = inputs['''original_image''']
else:
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ , UpperCamelCase__ :Tuple = pipe.encode_prompt(UpperCamelCase_ )
# inputs with prompt converted to embeddings
UpperCamelCase__ :List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ :List[str] = image
if mask_image is not None:
UpperCamelCase__ :List[Any] = mask_image
if original_image is not None:
UpperCamelCase__ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Dict = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCamelCase__ :List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = inputs['''generator''']
UpperCamelCase__ :Optional[int] = inputs['''num_inference_steps''']
UpperCamelCase__ :Any = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase__ :List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ :Dict = image
if mask_image is not None:
UpperCamelCase__ :int = mask_image
if original_image is not None:
UpperCamelCase__ :Optional[int] = original_image
UpperCamelCase__ :str = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[Any] = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ :List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :int = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ :Tuple = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 )
| 97
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1
| 0
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : Tuple = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : Tuple = torch.permute(_lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ):
# linear layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if "metadata" in layer:
lowerCamelCase__ : Tuple = layer.split('metadata' )
lowerCamelCase__ : str = ''.join(split_layer[0] )[:-1]
lowerCamelCase__ : Dict = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCamelCase__ : List[str] = layer.split('kvstore' )
lowerCamelCase__ : Any = ''.join(split_layer[0] )[:-1]
lowerCamelCase__ : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCamelCase__ : Union[str, Any] = layer.split('/' )
lowerCamelCase__ : str = '/'.join(split_layer[:-1] )
lowerCamelCase__ : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : Dict = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : Optional[Any] = 'file'
else:
lowerCamelCase__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = rename_keys(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = {}
for k, v in current_block.items():
lowerCamelCase__ : int = v
lowerCamelCase__ : List[str] = new_current_block
torch.save(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ):
lowerCamelCase__ : Dict = convert_file_size_to_int(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Optional[Any] = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCamelCase__ : int = serialization.msgpack_restore(fp.read() )['optimizer']['target']
lowerCamelCase__ : Optional[int] = flatten_dict(_lowerCamelCase , sep='/' )
lowerCamelCase__ : Dict = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = get_key_and_tensorstore_dict(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : int = content
else:
lowerCamelCase__ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : Union[str, Any] = torch.tensor(_lowerCamelCase )
lowerCamelCase__ : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ , lowerCamelCase__ : str = rename_base_flax_keys(tuple(key.split('/' ) ) , _lowerCamelCase )
lowerCamelCase__ : List[str] = '/'.join(_lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace('.bin' , f'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = raw_weights.to(getattr(_lowerCamelCase , _lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : str = os.path.join(_lowerCamelCase , weights_name.replace('.bin' , f'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : str = {}
lowerCamelCase__ : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
lowerCamelCase__ : str = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Optional[int] = shard
for key in shard:
lowerCamelCase__ : int = shard_file
# Add the metadata
lowerCamelCase__ : Tuple = {'total_size': total_size}
lowerCamelCase__ : List[Any] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + '\n'
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A_ : List[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Optional[int] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCamelCase__ : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCamelCase__ : List[str] = TaTokenizer.from_pretrained('t5-small' )
lowerCamelCase__ : int = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
lowerCamelCase__ : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors='pt' ).input_ids
lowerCamelCase__ : Union[str, Any] = model.generate(_lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __UpperCAmelCase ) -> str:
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self ) -> Union[str, Any]:
raise NotImplementedError()
| 320
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a (yaml.SafeLoader ):
"""simple docstring"""
def __snake_case ( self : str , lowerCamelCase : Any ) -> Dict:
__snake_case : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__snake_case : List[Any] = [tuple(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else key for key in keys]
__snake_case : int = Counter(lowerCamelCase )
__snake_case : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]=False ) -> int:
__snake_case : str = super().construct_mapping(lowerCamelCase , deep=lowerCamelCase )
self._check_no_duplicates_on_constructed_node(lowerCamelCase )
return mapping
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__snake_case : Dict = full_content[1:].index("---" ) + 1
__snake_case : Union[str, Any] = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def __snake_case ( cls : Optional[Any] , lowerCamelCase : Path ) -> "DatasetMetadata":
with open(lowerCamelCase , encoding="utf-8" ) as readme_file:
__snake_case , __snake_case : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCamelCase )
else:
return cls()
def __snake_case ( self : Any , lowerCamelCase : Path ) -> int:
if path.exists():
with open(lowerCamelCase , encoding="utf-8" ) as readme_file:
__snake_case : Any = readme_file.read()
else:
__snake_case : Tuple = None
__snake_case : Optional[Any] = self._to_readme(lowerCamelCase )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : Optional[str] = None ) -> str:
if readme_content is not None:
__snake_case , __snake_case : List[str] = _split_yaml_from_readme(lowerCamelCase )
__snake_case : Optional[int] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
__snake_case : Optional[int] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __snake_case ( cls : Optional[int] , lowerCamelCase : str ) -> "DatasetMetadata":
__snake_case : int = yaml.load(lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__snake_case : Optional[Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCamelCase )
def __snake_case ( self : Dict ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCamelCase , allow_unicode=lowerCamelCase , encoding="utf-8" , ).decode("utf-8" )
_snake_case : List[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case : Dict = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
_snake_case : int = ap.parse_args()
_snake_case : Union[str, Any] = Path(args.readme_filepath)
_snake_case : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 134
|
import numpy
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : numpy.ndarray , lowerCamelCase : numpy.ndarray ) -> None:
__snake_case : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case : Optional[int] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case : int = numpy.random.rand(3 , 1 )
# Real output values provided.
__snake_case : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case : Optional[int] = numpy.zeros(output_array.shape )
def __snake_case ( self : List[Any] ) -> numpy.ndarray:
__snake_case : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self : Union[str, Any] ) -> None:
__snake_case : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__snake_case : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__snake_case : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self : List[str] , lowerCamelCase : numpy.ndarray , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__snake_case : Any = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self : Optional[Any] , lowerCamelCase : numpy.ndarray ) -> int:
__snake_case : Any = input_arr
__snake_case : List[str] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__snake_case : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__snake_case : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( __lowerCamelCase ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( __lowerCamelCase ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
__snake_case : Dict = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case : int = TwoHiddenLayerNeuralNetwork(
input_array=__lowerCamelCase , output_array=__lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowerCamelCase , iterations=1_0 , give_loss=__lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 134
| 1
|
import torch
from diffusers import DiffusionPipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def __call__( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : Any = 1
A_ : Optional[int] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
A_ : Optional[Any] = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
A_ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_SCREAMING_SNAKE_CASE )
return result
| 186
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "glpn"
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=-1 , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = num_encoder_blocks
A_ : int = depths
A_ : Dict = sr_ratios
A_ : Any = hidden_sizes
A_ : int = patch_sizes
A_ : Optional[int] = strides
A_ : str = mlp_ratios
A_ : List[str] = num_attention_heads
A_ : str = hidden_act
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : Tuple = drop_path_rate
A_ : Optional[int] = layer_norm_eps
A_ : List[str] = decoder_hidden_size
A_ : List[Any] = max_depth
A_ : List[Any] = head_in_index
| 186
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : List[Any] = SwinConfig()
__lowercase : Union[str, Any] = swin_name.split("""_""" )
__lowercase : Tuple = name_split[1]
__lowercase : int = int(name_split[4] )
__lowercase : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__lowercase : Optional[Any] = 96
__lowercase : Tuple = (2, 2, 6, 2)
__lowercase : str = (3, 6, 12, 24)
elif model_size == "small":
__lowercase : Optional[Any] = 96
__lowercase : Optional[Any] = (2, 2, 18, 2)
__lowercase : Any = (3, 6, 12, 24)
elif model_size == "base":
__lowercase : List[Any] = 128
__lowercase : str = (2, 2, 18, 2)
__lowercase : List[str] = (4, 8, 16, 32)
else:
__lowercase : Union[str, Any] = 192
__lowercase : Optional[int] = (2, 2, 18, 2)
__lowercase : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__lowercase : List[str] = 21841
else:
__lowercase : Any = 1000
__lowercase : Optional[Any] = """huggingface/label-files"""
__lowercase : Optional[Any] = """imagenet-1k-id2label.json"""
__lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__lowercase : Dict = idalabel
__lowercase : int = {v: k for k, v in idalabel.items()}
__lowercase : Union[str, Any] = img_size
__lowercase : Union[str, Any] = num_classes
__lowercase : List[Any] = embed_dim
__lowercase : str = depths
__lowercase : int = num_heads
__lowercase : List[str] = window_size
return config
def snake_case_ ( lowerCAmelCase_ : Any ):
if "patch_embed.proj" in name:
__lowercase : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowercase : Tuple = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowercase : str = """encoder.""" + name
if "attn.proj" in name:
__lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : int = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
__lowercase : List[str] = """layernorm.weight"""
if name == "norm.bias":
__lowercase : Any = """layernorm.bias"""
if "head" in name:
__lowercase : List[Any] = name.replace("""head""" , """classifier""" )
else:
__lowercase : str = """swin.""" + name
return name
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
for key in orig_state_dict.copy().keys():
__lowercase : Optional[int] = orig_state_dict.pop(lowerCamelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase : Union[str, Any] = key.split(""".""" )
__lowercase : str = int(key_split[1] )
__lowercase : Optional[int] = int(key_split[3] )
__lowercase : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase : List[Any] = val[:dim, :]
__lowercase : List[str] = val[
dim : dim * 2, :
]
__lowercase : int = val[-dim:, :]
else:
__lowercase : Dict = val[
:dim
]
__lowercase : List[str] = val[
dim : dim * 2
]
__lowercase : str = val[
-dim:
]
else:
__lowercase : Optional[Any] = val
return orig_state_dict
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ):
__lowercase : int = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
__lowercase : Any = get_swin_config(lowerCamelCase_ )
__lowercase : Any = SwinForImageClassification(lowerCamelCase_ )
model.eval()
__lowercase : Dict = convert_state_dict(timm_model.state_dict() , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
__lowercase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : str = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
__lowercase : int = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
__lowercase : Any = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" )
__lowercase : List[str] = timm_model(inputs["""pixel_values"""] )
__lowercase : List[str] = model(**lowerCamelCase_ ).logits
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=[1, 1, 2] , a__=1 , a__=32 , a__=4 , a__=8 , a__=37 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=0.0 , a__=512 , a__=3 , a__=0.0_2 , a__=3 , a__=4 , a__=None , a__=False , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = block_sizes
_lowerCAmelCase : List[str] = num_decoder_layers
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : List[Any] = n_head
_lowerCAmelCase : Any = d_head
_lowerCAmelCase : Union[str, Any] = d_inner
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : Optional[int] = attention_dropout
_lowerCAmelCase : int = activation_dropout
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCAmelCase : Dict = n_head
# Used in the tests to check the size of the first hidden state
_lowerCAmelCase : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCAmelCase : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCAmelCase : List[Any] = self.num_hidden_layers + 2
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Tuple = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = TFFunnelModel(config=a__ )
_lowerCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[Any] = model(a__ )
_lowerCAmelCase : Tuple = [input_ids, input_mask]
_lowerCAmelCase : List[str] = model(a__ )
_lowerCAmelCase : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : str = False
_lowerCAmelCase : List[str] = TFFunnelModel(config=a__ )
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = TFFunnelModel(config=a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[Any] = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Any = model(a__ )
_lowerCAmelCase : Optional[int] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(a__ )
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : int = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Union[str, Any] = TFFunnelForPreTraining(config=a__ )
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Any = TFFunnelForMaskedLM(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[str] = TFFunnelForSequenceClassification(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = self.num_choices
_lowerCAmelCase : Dict = TFFunnelForMultipleChoice(config=a__ )
_lowerCAmelCase : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : int = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : str = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : List[Any] = TFFunnelForTokenClassification(config=a__ )
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = TFFunnelForQuestionAnswering(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : int = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Dict = TFFunnelModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : Tuple = TFFunnelModelTester(self , base=a__ )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
| 44
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''t5'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : str = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : int = num_layers
UpperCAmelCase_ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Any = relative_attention_num_buckets
UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = dropout_rate
UpperCAmelCase_ : Tuple = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : int = feed_forward_proj
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Optional[int] = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : int = "gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Union[str, Any] = {0: "batch"}
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 13
| 268
| 0
|
'''simple docstring'''
lowercase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Return True if there is node that has not iterated.
UpperCAmelCase : List[Any] = [False] * len(UpperCAmelCase_ )
UpperCAmelCase : int = [s]
UpperCAmelCase : str = True
while queue:
UpperCAmelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[str] = u
return visited[t]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [-1] * (len(UpperCAmelCase_ ))
UpperCAmelCase : str = 0
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = float('Inf' )
UpperCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Dict = min(UpperCAmelCase_ , graph[parent[s]][s] )
UpperCAmelCase : List[str] = parent[s]
max_flow += path_flow
UpperCAmelCase : Union[str, Any] = sink
while v != source:
UpperCAmelCase : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Union[str, Any] = parent[v]
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 280
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280
| 1
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( snake_case :Optional[int] , snake_case :int ) -> int:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCamelCase = flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase = torch.permute(snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ):
# linear layer
__UpperCamelCase = flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A ( snake_case :List[str] , snake_case :List[str] , snake_case :Union[str, Any] ) -> Tuple:
if "metadata" in layer:
__UpperCamelCase = layer.split('metadata' )
__UpperCamelCase = ''.join(split_layer[0] )[:-1]
__UpperCamelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__UpperCamelCase = layer.split('kvstore' )
__UpperCamelCase = ''.join(split_layer[0] )[:-1]
__UpperCamelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__UpperCamelCase = layer.split('/' )
__UpperCamelCase = '/'.join(split_layer[:-1] )
__UpperCamelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__UpperCamelCase = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__UpperCamelCase = 'file'
else:
__UpperCamelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( snake_case :List[str] , snake_case :str ) -> List[str]:
__UpperCamelCase = rename_keys(snake_case )
__UpperCamelCase = {}
for k, v in current_block.items():
__UpperCamelCase = v
__UpperCamelCase = new_current_block
torch.save(snake_case , snake_case )
def A ( snake_case :Optional[int] , snake_case :Tuple , snake_case :Optional[Any] , snake_case :Dict , snake_case :str = WEIGHTS_NAME ) -> str:
__UpperCamelCase = convert_file_size_to_int(snake_case )
__UpperCamelCase = []
__UpperCamelCase = {}
__UpperCamelCase = 0
__UpperCamelCase = 0
os.makedirs(snake_case , exist_ok=snake_case )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__UpperCamelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__UpperCamelCase = flatten_dict(snake_case , sep='/' )
__UpperCamelCase = {}
for layer in checkpoint_info.keys():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_key_and_tensorstore_dict(
snake_case , snake_case , snake_case )
if curr_real_layer_name in all_layers:
__UpperCamelCase = content
else:
__UpperCamelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__UpperCamelCase = torch.tensor(snake_case )
__UpperCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__UpperCamelCase , __UpperCamelCase = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case )
__UpperCamelCase = '/'.join(snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCamelCase = os.path.join(
snake_case , weights_name.replace('.bin' , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
__UpperCamelCase = {}
__UpperCamelCase = 0
__UpperCamelCase = raw_weights.to(getattr(snake_case , snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCamelCase = os.path.join(snake_case , weights_name.replace('.bin' , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCamelCase = {}
__UpperCamelCase = {}
for idx, shard in enumerate(snake_case ):
__UpperCamelCase = weights_name.replace(
'.bin' , f'-{idx+1:05d}-of-{len(snake_case ):05d}.bin' ) # len(sharded_state_dicts):05d}
__UpperCamelCase = os.path.join(snake_case , weights_name.replace('.bin' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case , os.path.join(snake_case , snake_case ) )
__UpperCamelCase = shard
for key in shard:
__UpperCamelCase = shard_file
# Add the metadata
__UpperCamelCase = {'total_size': total_size}
__UpperCamelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case , snake_case ) , 'w' , encoding='utf-8' ) as f:
__UpperCamelCase = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n'
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase : List[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ) -> List[str]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCamelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__UpperCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__UpperCamelCase = TaTokenizer.from_pretrained('t5-small' )
__UpperCamelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__UpperCamelCase = tokenizer(snake_case , return_tensors='pt' ).input_ids
__UpperCamelCase = model.generate(snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """informer"""
__lowerCAmelCase : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :Tuple ,__lowercase :int = None ,__lowercase :Optional[int] = None ,__lowercase :List[str] = "student_t" ,__lowercase :List[Any] = "nll" ,__lowercase :Optional[Any] = 1 ,__lowercase :str = None ,__lowercase :List[Any] = "mean" ,__lowercase :str = 0 ,__lowercase :str = 0 ,__lowercase :Any = 0 ,__lowercase :List[str] = 0 ,__lowercase :List[Any] = None ,__lowercase :Optional[int] = None ,__lowercase :str = 6_4 ,__lowercase :List[Any] = 3_2 ,__lowercase :int = 3_2 ,__lowercase :Optional[Any] = 2 ,__lowercase :List[str] = 2 ,__lowercase :str = 2 ,__lowercase :Union[str, Any] = 2 ,__lowercase :str = True ,__lowercase :str = "gelu" ,__lowercase :int = 0.05 ,__lowercase :Tuple = 0.1 ,__lowercase :List[Any] = 0.1 ,__lowercase :str = 0.1 ,__lowercase :Optional[Any] = 0.1 ,__lowercase :Optional[int] = 1_0_0 ,__lowercase :Any = 0.02 ,__lowercase :Dict=True ,__lowercase :List[Any] = "prob" ,__lowercase :Optional[Any] = 5 ,__lowercase :Tuple = True ,**__lowercase :Dict ,):
snake_case__ : Optional[int] = prediction_length
snake_case__ : Tuple = context_length or prediction_length
snake_case__ : Any = distribution_output
snake_case__ : Union[str, Any] = loss
snake_case__ : Dict = input_size
snake_case__ : Tuple = num_time_features
snake_case__ : str = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ : Any = scaling
snake_case__ : List[str] = num_dynamic_real_features
snake_case__ : Optional[Any] = num_static_real_features
snake_case__ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : str = cardinality
else:
snake_case__ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : Optional[Any] = embedding_dimension
else:
snake_case__ : Tuple = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
snake_case__ : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ : List[str] = d_model
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[Any] = decoder_attention_heads
snake_case__ : Tuple = encoder_ffn_dim
snake_case__ : Any = decoder_ffn_dim
snake_case__ : List[Any] = encoder_layers
snake_case__ : Dict = decoder_layers
snake_case__ : Optional[int] = dropout
snake_case__ : List[Any] = attention_dropout
snake_case__ : int = activation_dropout
snake_case__ : Optional[int] = encoder_layerdrop
snake_case__ : Union[str, Any] = decoder_layerdrop
snake_case__ : Union[str, Any] = activation_function
snake_case__ : Dict = init_std
snake_case__ : int = use_cache
# Informer
snake_case__ : List[Any] = attention_type
snake_case__ : Optional[int] = sampling_factor
snake_case__ : str = distil
super().__init__(is_encoder_decoder=__a ,**__a )
@property
def __lowerCamelCase ( self :Union[str, Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 361
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ):
snake_case__ : List[Any] = False
for j in range(__lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case__ , snake_case__ : Optional[int] = unsorted[j - 1], unsorted[j]
snake_case__ : Any = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
snake_case__ , snake_case__ : Tuple = unsorted[j + 1], unsorted[j]
snake_case__ : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by a comma:\n''').strip()
A__ = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 44
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
__snake_case : Optional[int] = namedtuple('CoinsDistribResult', 'moves excess')
def __lowerCamelCase ( __snake_case : TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__snake_case ) != count_coins(__snake_case ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
A__ , A__ : int =get_distrib(node.left )
A__ , A__ : Optional[int] =get_distrib(node.right )
A__ : Tuple =1 - left_distrib_excess
A__ : List[Any] =1 - right_distrib_excess
A__ : List[Any] =(
left_distrib_moves
+ right_distrib_moves
+ abs(__snake_case )
+ abs(__snake_case )
)
A__ : List[str] =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__snake_case, __snake_case )
return get_distrib(__snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__snake_case : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__snake_case : Optional[Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = RoFormerTokenizer
def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Union[str, Any]="[PAD]" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
):
A__ : int =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) )
A__ : Union[str, Any] =do_lower_case
A__ : Tuple =strip_accents
A__ : int =pre_tok_class(**lowerCAmelCase_ )
A__ : List[Any] =do_lower_case
def __getstate__( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : Any =self.__dict__.copy()
A__ : List[str] =BertPreTokenizer()
return state
def __setstate__( self : int , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : str =d
A__ : Optional[Any] =self.__dict__["""_tokenizer"""].get_vocab()
A__ : Any =PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase_ ) )
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : int =[self.sep_token_id]
A__ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : List[Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Tuple , ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
| 134
| 1
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : int , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : str ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCamelCase , **__lowerCamelCase )
return wrapper
| 368
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.