code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = AudioLDMPipeline
lowerCamelCase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowerCamelCase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase__ , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowercase__ = ClapTextModelWithProjection(lowerCamelCase__ )
lowercase__ = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowercase__ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase__ , )
lowercase__ = SpeechTaHifiGan(lowerCamelCase__ )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Tuple:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs["""prompt"""]]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs.pop("""prompt""" )]
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""pt""" , )
lowercase__ = text_inputs["""input_ids"""].to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
lowercase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase__ , dim=-1 )
lowercase__ = prompt_embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * ["""this is a negative prompt"""]
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs["""prompt"""]]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs.pop("""prompt""" )]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""pt""" , )
lowercase__ = text_inputs["""input_ids"""].to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
lowercase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase__ , dim=-1 )
embeds.append(lowerCamelCase__ )
lowercase__ , lowercase__ = embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """egg cracking"""
lowercase__ = audioldm_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ = 2
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(audio_length_in_s=0.0_16 , **lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_16
lowercase__ = audioldm_pipe(audio_length_in_s=0.0_32 , **lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_32
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = ["""hey"""]
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
lowercase__ = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ = SpeechTaHifiGan(lowerCamelCase__ ).to(lowerCamelCase__ )
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
lowercase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ )
@slow
class A ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 8, 128, 16) )
lowercase__ = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = 25
lowercase__ = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 81_920
lowercase__ = audio[77_230:77_240]
lowercase__ = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 81_920
lowercase__ = audio[27_780:27_790]
lowercase__ = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 164 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A : Optional[Any] = logging.get_logger(__name__)
def a__ ( __UpperCamelCase ):
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''pixel_values''']
def __init__( self : Dict , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[Any] , ) -> None:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = offset
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : List[str] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__magic_name__ , size["shortest_edge"] , default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ) -> str:
SCREAMING_SNAKE_CASE_ = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE_ = image - (scale / 2)
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = to_numpy_array(__magic_name__ )
if do_resize:
SCREAMING_SNAKE_CASE_ = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ )
if do_center_crop:
SCREAMING_SNAKE_CASE_ = self.center_crop(__magic_name__ , size=__magic_name__ )
if do_rescale:
SCREAMING_SNAKE_CASE_ = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ )
if do_normalize:
SCREAMING_SNAKE_CASE_ = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ )
SCREAMING_SNAKE_CASE_ = to_channel_dimension_format(__magic_name__ , __magic_name__ )
return image
def __A ( self : Dict , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE_ = make_batched(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [
[
self._preprocess_image(
image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ = {"pixel_values": videos}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 366 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
__lowerCamelCase = to_pil_image(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pil_image.size
__lowerCamelCase = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCamelCase = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
__lowerCamelCase = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCamelCase = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
__lowerCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["pixel_values"]
def __init__( self : List[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : float = 1 / 2_55 , a : bool = True , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = True , a : Optional[str] = None , a : Optional[str] = "" , **a : Any , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_value
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowerCamelCase = apply_ocr
__lowerCamelCase = ocr_lang
__lowerCamelCase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCamelCase = (size['''height'''], size['''width'''])
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : Union[float, Iterable[float]] , a : Union[float, Iterable[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : int , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : Optional[Any]=None , a : bool = None , a : float = None , a : bool = None , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : List[Any] , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCamelCase = []
__lowerCamelCase = []
for image in images:
__lowerCamelCase , __lowerCamelCase = apply_tesseract(a , a , a )
words_batch.append(a )
boxes_batch.append(a )
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=a )
if apply_ocr:
__lowerCamelCase = words_batch
__lowerCamelCase = boxes_batch
return data
| 67 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 1 |
snake_case : List[str] = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase_ ( _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__magic_name__ : Stack[int] = Stack()
__magic_name__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(_snake_case )
elif i == ")":
# RULE 4
__magic_name__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
__magic_name__ : Any = operand_stack.peek()
operand_stack.pop()
__magic_name__ : Dict = operand_stack.peek()
operand_stack.pop()
__magic_name__ : Optional[int] = operators[opr](_snake_case , _snake_case )
operand_stack.push(_snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case : List[str] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 41 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = hidden_size if embedding_size is None else embedding_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[int] = rotary_value
__magic_name__ : List[Any] = use_cache
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : str = {0: "batch", 1: "sequence"}
__magic_name__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41 | 1 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "align_text_model"
def __init__( self : Any , lowercase_ : str=3_0522 , lowercase_ : int=768 , lowercase_ : Tuple=12 , lowercase_ : int=12 , lowercase_ : Dict=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=512 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=1e-12 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Tuple=True , **lowercase_ : List[str] , ):
super().__init__(**lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
@classmethod
def A_ ( cls : int , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Tuple ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align_vision_model"
def __init__( self : Any , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.2 , **lowercase_ : Any , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align"
snake_case_ = True
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Dict=640 , lowercase_ : Optional[int]=1.0 , lowercase_ : Any=0.02 , **lowercase_ : Optional[int] , ):
super().__init__(**lowercase_ )
if text_config is None:
snake_case_ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
snake_case_ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
snake_case_ = AlignTextConfig(**lowercase_ )
snake_case_ = AlignVisionConfig(**lowercase_ )
snake_case_ = projection_dim
snake_case_ = temperature_init_value
snake_case_ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : AlignTextConfig , lowercase_ : AlignVisionConfig , **lowercase_ : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 56 | import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 | 0 |
"""simple docstring"""
from math import pow, sqrt
def SCREAMING_SNAKE_CASE_ ( *snake_case : float )-> Tuple:
_lowerCamelCase = len(__lowerCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float )-> Optional[Any]:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> List[Any]:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> str:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> Any:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> Optional[int]:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 370 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A_ : int =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __init__( self , **a__ ):
super().__init__(**a__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_lowerCamelCase = kwargs.pop('encoder' )
_lowerCamelCase = encoder_config.pop('model_type' )
_lowerCamelCase = kwargs.pop('decoder' )
_lowerCamelCase = decoder_config.pop('model_type' )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = True
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCamelCase = True
_lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.encoder.to_dict()
_lowerCamelCase = self.decoder.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ):
return 1e-4
@property
def snake_case_ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
_lowerCamelCase = OrderedDict()
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
import torch
_lowerCamelCase = OrderedDict()
_lowerCamelCase = super().generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
_lowerCamelCase , _lowerCamelCase = dummy_input['input_ids'].shape
_lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase = dummy_input.pop('input_ids' )
_lowerCamelCase = dummy_input.pop('attention_mask' )
_lowerCamelCase = torch.zeros(a__ )
return common_inputs
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
pass
def snake_case_ ( self , a__ ):
return VisionEncoderDecoderEncoderOnnxConfig(a__ )
def snake_case_ ( self , a__ , a__ , a__ = "default" ):
_lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a__ , a__ )
| 80 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
snake_case__ : List[Any] = '''examples/'''
snake_case__ : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
snake_case__ : int = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
snake_case__ : Union[str, Any] = '''README.md'''
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : str ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : Tuple = f.read()
lowerCAmelCase, lowerCAmelCase : List[str] = REPLACE_PATTERNS[pattern]
lowerCAmelCase : Tuple = replace.replace('''VERSION''' , _snake_case )
lowerCAmelCase : Optional[Any] = re_pattern.sub(_snake_case , _snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_snake_case )
def _snake_case ( _snake_case : List[Any] ):
for folder, directories, fnames in os.walk(_snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern='''examples''' )
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_snake_case , _snake_case , _snake_case )
if not patch:
update_version_in_examples(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Tuple = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase : List[Any] = '''1. Want to contribute a new model?'''
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : Tuple = f.readlines()
# Find the start of the list.
lowerCAmelCase : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_snake_case )
def _snake_case ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCAmelCase : Optional[Any] = f.read()
lowerCAmelCase : Any = REPLACE_PATTERNS['''init'''][0].search(_snake_case ).groups()[0]
return packaging.version.parse(_snake_case )
def _snake_case ( _snake_case : int=False ):
lowerCAmelCase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase : List[str] = default_version.base_version
elif patch:
lowerCAmelCase : Optional[Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCAmelCase : Tuple = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCAmelCase : int = input(f'''Which version are you releasing? [{default_version}]''' )
if len(_snake_case ) == 0:
lowerCAmelCase : Union[str, Any] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(_snake_case , patch=_snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _snake_case ( ):
lowerCAmelCase : Optional[int] = get_version()
lowerCAmelCase : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCAmelCase : int = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase : Optional[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(_snake_case ) == 0:
lowerCAmelCase : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(_snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
snake_case__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 60 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = KandinskyVaaControlnetImgaImgPipeline
_SCREAMING_SNAKE_CASE : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : int ) -> Optional[Any]:
return 32
@property
def a ( self : Union[str, Any] ) -> Dict:
return 32
@property
def a ( self : str ) -> Union[str, Any]:
return self.time_input_dim
@property
def a ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a ( self : Union[str, Any] ) -> List[Any]:
return 1_00
@property
def a ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def a ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Any ) -> Dict:
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a ( self : List[Any] ) -> int:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 2_5_5.0
__lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase = """A robot, 4k photo"""
__lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 229 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCamelCase__):
_a : Any = (UniPCMultistepScheduler,)
_a : int = (('''num_inference_steps''', 25),)
def UpperCAmelCase__( self : Any , **_SCREAMING_SNAKE_CASE : Tuple )-> Dict:
lowerCAmelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int=0 , **_SCREAMING_SNAKE_CASE : str )-> int:
lowerCAmelCase__ : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Optional[int] = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : int = 0.1 * sample
lowerCAmelCase__ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : int = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ : int = sample, sample
for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase__ : Any = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : int = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , **_SCREAMING_SNAKE_CASE : Tuple )-> str:
lowerCAmelCase__ : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.dummy_sample
lowerCAmelCase__ : List[str] = 0.1 * sample
lowerCAmelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : List[Any] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , **_SCREAMING_SNAKE_CASE : Tuple )-> int:
if scheduler is None:
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : int = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 10
lowerCAmelCase__ : int = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase__( self : Union[str, Any] )-> int:
lowerCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : List[Any] = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self.dummy_sample
lowerCAmelCase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase__ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase__ : Dict = scheduler.timesteps[5]
lowerCAmelCase__ : str = scheduler.timesteps[6]
lowerCAmelCase__ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__( self : Any )-> Any:
lowerCAmelCase__ : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ : Union[str, Any] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
lowerCAmelCase__ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : str = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : Optional[int] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__( self : Tuple )-> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Tuple:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : Any )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Dict = self.full_loop(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> str:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 )
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = self.full_loop()
lowerCAmelCase__ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : Optional[Any] = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase__ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = 10
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__( self : Dict , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> Tuple:
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : List[str] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 354 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _a ( _lowercase):
_a : Dict = '''convbert'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=3_0522 , _SCREAMING_SNAKE_CASE : Union[str, Any]=768 , _SCREAMING_SNAKE_CASE : Tuple=12 , _SCREAMING_SNAKE_CASE : str=12 , _SCREAMING_SNAKE_CASE : Any=3072 , _SCREAMING_SNAKE_CASE : List[Any]="gelu" , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : int=512 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Optional[int]=0.02 , _SCREAMING_SNAKE_CASE : Optional[int]=1E-12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=768 , _SCREAMING_SNAKE_CASE : Optional[Any]=2 , _SCREAMING_SNAKE_CASE : Tuple=9 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : Tuple=None , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : int = embedding_size
lowerCAmelCase__ : Union[str, Any] = head_ratio
lowerCAmelCase__ : Optional[int] = conv_kernel_size
lowerCAmelCase__ : List[str] = num_groups
lowerCAmelCase__ : Dict = classifier_dropout
class _a ( _lowercase):
@property
def UpperCAmelCase__( self : Tuple )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 211 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( ):
__UpperCamelCase =2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any = 2_00_00_00 ):
return sum(takewhile(lambda SCREAMING_SNAKE_CASE__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = FlaxAutoencoderKL
@property
def a_ ( self) -> Tuple:
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0)
snake_case_ = jax.random.uniform(lowerCAmelCase__, ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def a_ ( self) -> Optional[int]:
snake_case_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 312 | """simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''Hello, World!'''
lowerCAmelCase__ = '''en_XX'''
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = Path("data_bin" )
lowerCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
lowerCAmelCase : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase : List[str] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase : Optional[int] = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase : List[str] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : str = model.roberta.encoder.layer[i]
lowerCAmelCase : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCAmelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase : str = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase : List[Any] = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase : List[str] = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCAmelCase : Optional[int] = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase : List[Any] = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCAmelCase : int = xmod_layer.fca.weight
lowerCAmelCase : Union[str, Any] = xmod_layer.fca.bias
# output
lowerCAmelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCAmelCase : Optional[int] = xmod_layer.fca.weight
lowerCAmelCase : str = xmod_layer.fca.bias
lowerCAmelCase : List[Any] = xmod_layer.final_layer_norm.weight
lowerCAmelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase : Any = bert_output.adapter_modules[lang_code]
lowerCAmelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase : int = from_adapter.fca.weight
lowerCAmelCase : str = from_adapter.fca.bias
lowerCAmelCase : List[Any] = from_adapter.fca.weight
lowerCAmelCase : Union[str, Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase : Any = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase : Any = xmod.model.classification_heads["mnli"].dense.weight
lowerCAmelCase : Dict = xmod.model.classification_heads["mnli"].dense.bias
lowerCAmelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
lowerCAmelCase : int = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : List[Any] = xmod.model.encoder.lm_head.weight
lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : List[str] = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
lowerCAmelCase : Optional[Any] = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
lowerCAmelCase : Optional[int] = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase : Union[str, Any] = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 108 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase : List[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowercase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 | from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a_ = """bert-base-cased"""
a_ = """google/pegasus-xsum"""
a_ = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
a_ = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
a_ = """patrickvonplaten/t5-tiny-random"""
a_ = """sshleifer/bart-tiny-random"""
a_ = """sshleifer/tiny-mbart"""
a_ = """sshleifer/tiny-marian-en-de"""
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] ="""\n""".join(lowerCAmelCase_ )
Path(lowerCAmelCase_ ).open('''w''' ).writelines(lowerCAmelCase_ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase_, f"{split}.source" ), lowerCAmelCase_ )
_dump_articles(os.path.join(lowerCAmelCase_, f"{split}.target" ), lowerCAmelCase_ )
return tmp_dir
class __SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __magic_name__ ( self : Optional[Any] , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ : Tuple =max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ : Any =4
SCREAMING_SNAKE_CASE__ : Optional[int] =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE__ : str ="""ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE__ : Dict =SeqaSeqDataset(
a_ , data_dir=a_ , type_path='''train''' , max_source_length=a_ , max_target_length=a_ , src_lang=a_ , tgt_lang=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple =DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a_ , a_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE__ : Any =shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> str:
SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : List[str] =max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ : Tuple =max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ : Tuple =4
SCREAMING_SNAKE_CASE__ : List[Any] =LegacySeqaSeqDataset(
a_ , data_dir=a_ , type_path='''train''' , max_source_length=20 , max_target_length=a_ , )
SCREAMING_SNAKE_CASE__ : int =DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
SCREAMING_SNAKE_CASE__ : List[str] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE__ : Dict =tmp_dir.joinpath('''train.source''' ).open().readlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a_ , a_ , 1_28 , a_ )
SCREAMING_SNAKE_CASE__ : List[str] ={x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE__ : Tuple ={x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE__ : List[str] =save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a_ ) < len(a_ )
assert len(a_ ) == 1
assert len(packed_examples[0] ) == sum(len(a_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def __magic_name__ ( self : Optional[Any] ) -> int:
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE__ : str =self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE__ : Any =64
SCREAMING_SNAKE_CASE__ : List[str] =ds.make_dynamic_sampler(a_ , required_batch_size_multiple=a_ )
SCREAMING_SNAKE_CASE__ : Dict =[len(a_ ) for x in batch_sampler]
assert len(set(a_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a_ ) == len(a_ ) # no dropped or added examples
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DataLoader(a_ , batch_sampler=a_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for batch in data_loader:
SCREAMING_SNAKE_CASE__ : Optional[Any] =batch["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Tuple =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE__ : Tuple =np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(a_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a_ )
assert num_src_per_batch[0] == max(a_ )
if failures:
raise AssertionError(F"too many tokens in {len(a_ )} batches" )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =self._get_dataset(max_len=5_12 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =2
SCREAMING_SNAKE_CASE__ : Optional[Any] =ds.make_sortish_sampler(a_ , shuffle=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ : str =DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.pad_token_id
def count_pad_tokens(__lowercase : str , __lowercase : str="input_ids" ):
return [batch[k].eq(a_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a_ , k='''labels''' ) ) < sum(count_pad_tokens(a_ , k='''labels''' ) )
assert sum(count_pad_tokens(a_ ) ) < sum(count_pad_tokens(a_ ) )
assert len(a_ ) == len(a_ )
def __magic_name__ ( self : Optional[int] , __lowercase : str=10_00 , __lowercase : Tuple=1_28 ) -> List[str]:
if os.getenv('''USE_REAL_DATA''' , a_ ):
SCREAMING_SNAKE_CASE__ : List[Any] ="""examples/seq2seq/wmt_en_ro"""
SCREAMING_SNAKE_CASE__ : int =max_len * 2 * 64
if not Path(a_ ).joinpath('''train.len''' ).exists():
save_len_file(a_ , a_ )
else:
SCREAMING_SNAKE_CASE__ : Any ="""examples/seq2seq/test_data/wmt_en_ro"""
SCREAMING_SNAKE_CASE__ : Tuple =max_len * 4
save_len_file(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Tuple =SeqaSeqDataset(
a_ , data_dir=a_ , type_path='''train''' , max_source_length=a_ , max_target_length=a_ , n_obs=a_ , )
return ds, max_tokens, tokenizer
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._get_dataset()
SCREAMING_SNAKE_CASE__ : Any =set(DistributedSortishSampler(a_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=a_ ) )
SCREAMING_SNAKE_CASE__ : Dict =set(DistributedSortishSampler(a_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=a_ ) )
assert idsa.intersection(a_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __magic_name__ ( self : int , __lowercase : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ : str =AutoTokenizer.from_pretrained(a_ , use_fast=a_ )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SeqaSeqDataset(
a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
SCREAMING_SNAKE_CASE__ : Dict =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SeqaSeqDataset(
a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a_ ) == 1 if tok_name == BART_TINY else len(a_ ) == 0 | 152 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215 | 0 |
"""simple docstring"""
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = 0, 1
while True:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = b, a + b
yield b
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Dict = 1
lowerCAmelCase_ :int = fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __snake_case :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str=99 , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : Any=7 , lowerCamelCase : int=9 , lowerCamelCase : str=True , lowerCamelCase : str=True , lowerCamelCase : Tuple=False , lowerCamelCase : Optional[int]=32 , lowerCamelCase : str=5 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : int=37 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=0.002 , lowerCamelCase : int=1 , lowerCamelCase : Any=0 , lowerCamelCase : str=0 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=None , ) -> Any:
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : Optional[Any] = encoder_seq_length
lowerCAmelCase_ : Union[str, Any] = decoder_seq_length
# For common tests
lowerCAmelCase_ : Optional[Any] = self.decoder_seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = use_attention_mask
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Optional[int] = d_ff
lowerCAmelCase_ : int = relative_attention_num_buckets
lowerCAmelCase_ : List[Any] = dropout_rate
lowerCAmelCase_ : Union[str, Any] = initializer_factor
lowerCAmelCase_ : str = eos_token_id
lowerCAmelCase_ : Any = pad_token_id
lowerCAmelCase_ : List[str] = decoder_start_token_id
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : str = decoder_layers
def __lowercase ( self : Optional[Any] ) -> Tuple:
return TaConfig.from_pretrained("""google/umt5-base""" )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None , lowerCamelCase : int=None , ) -> Tuple:
if attention_mask is None:
lowerCAmelCase_ : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase_ : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase_ : List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
lowerCAmelCase_ : Union[str, Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
lowerCAmelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase_ : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : List[Any] = config.num_attention_heads
lowerCAmelCase_ : int = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def __lowercase ( self : Any ) -> int:
lowerCAmelCase_, lowerCAmelCase_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Tuple ) -> Union[str, Any]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowercase ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , ) -> List[str]:
lowerCAmelCase_ : str = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Dict = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
lowerCAmelCase_ : Any = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
lowerCAmelCase_ : Tuple = result.last_hidden_state
lowerCAmelCase_ : Union[str, Any] = result.past_key_values
lowerCAmelCase_ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __lowercase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , ) -> List[Any]:
lowerCAmelCase_ : List[Any] = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
lowerCAmelCase_ : List[str] = model(lowerCamelCase , use_cache=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase )
lowerCAmelCase_ : List[str] = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : str = model(lowerCamelCase )["""last_hidden_state"""]
lowerCAmelCase_ : List[Any] = model(lowerCamelCase , past_key_values=lowerCamelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : Dict = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase_ : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
lowerCAmelCase_ : Optional[Any] = model(**lowerCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Dict = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Any = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=lowerCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = config_and_inputs[0]
lowerCAmelCase_ : Union[str, Any] = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
lowerCAmelCase_ : Tuple = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
lowerCAmelCase_ : List[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase_ : List[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
lowerCAmelCase_ : List[str] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase_ : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def __lowercase ( self : Union[str, Any] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=lowerCamelCase ).to(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase ).input_ids
# fmt: off
lowerCAmelCase_ : List[str] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = model.generate(input_ids.to(lowerCamelCase ) )
lowerCAmelCase_ : str = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 120 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : str = mock.Mock()
lowerCAmelCase_ : Optional[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase ) as mock_head:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Dict ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ) -> str:
lowerCAmelCase_ : Dict = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""test-feature-extractor""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 120 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Optional[Any] ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Any ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", revision="onnx", safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A red cat sitting on a park bench"
__lowercase = np.random.RandomState(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, mask_image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase ( self : List[Any] ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx" )
__lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", revision="onnx", scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A red cat sitting on a park bench"
__lowercase = np.random.RandomState(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, mask_image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 144 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_a = True
except (ImportError, ModuleNotFoundError):
_a = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _A ( UpperCamelCase_ : str) -> str:
'''simple docstring'''
re.sub("<n>", "", UpperCamelCase_) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_))
| 144 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Optional[int] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ :Optional[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ :List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ :Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase__ :int = {'unk_token': '<unk>'}
lowerCAmelCase__ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[Any] = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :str = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Dict = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :Optional[Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.get_image_processor()
lowerCAmelCase__ :Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ :Union[str, Any] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :Optional[int] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Union[str, Any] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :int = 'lower newer'
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Union[str, Any] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :int = 'lower newer'
lowerCAmelCase__ :Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ :Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.get_image_processor()
lowerCAmelCase__ :Tuple = self.get_tokenizer()
lowerCAmelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ :Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase__ :List[Any] = processor(images=__UpperCAmelCase , visual_prompt=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_image_processor()
lowerCAmelCase__ :int = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :str = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 254 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """facebook/bart-large-mnli"""
__magic_name__ :Any = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__magic_name__ :Optional[int] = """text_classifier"""
__magic_name__ :List[Any] = AutoTokenizer
__magic_name__ :str = AutoModelForSequenceClassification
__magic_name__ :int = ["""text""", ["""text"""]]
__magic_name__ :int = ["""text"""]
def snake_case ( self ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ :Any = self.model.config
lowerCAmelCase__ :Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase__ :Optional[Any] = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = outputs.logits
lowerCAmelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 254 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Optional[Any] = value
A : Node | None = None
A : Node | None = None
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : str = tree
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :List[str] = logging.get_logger(__name__)
__snake_case :str = '''▁'''
__snake_case :Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__snake_case :str = {'''vinai/bartpho-syllable''': 1024}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : Any="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : List[str]="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = monolingual_vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__a = {}
__a = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__SCREAMING_SNAKE_CASE) not in self.fairseq_tokens_to_ids:
__a = cnt
cnt += 1
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''') as f:
for line in f.readlines():
__a = line.strip().split()[0]
__a = len(self.fairseq_tokens_to_ids)
if str(__SCREAMING_SNAKE_CASE) not in self.fairseq_tokens_to_ids:
__a = len(self.fairseq_tokens_to_ids)
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[int]):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = ''''''.join(__SCREAMING_SNAKE_CASE).replace(__SCREAMING_SNAKE_CASE , ''' ''').strip()
return out_string
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
__SCREAMING_SNAKE_CASE) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(__SCREAMING_SNAKE_CASE)} \n')
return out_vocab_file, out_monolingual_vocab_file
| 131 |
def __snake_case ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
__a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 1 |
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase = list[list[float | int]]
def snake_case_ ( snake_case , snake_case ) -> Matrix:
lowercase__: int = len(snake_case )
lowercase__: Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case )]
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: float
for row in range(snake_case ):
for col in range(snake_case ):
lowercase__: List[Any] = matrix[row][col]
lowercase__: Optional[int] = vector[row][0]
lowercase__: str = 0
lowercase__: Any = 0
while row < size and col < size:
# pivoting
lowercase__: List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case , snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase__ , lowercase__: Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case ):
lowercase__: Any = augmented[rowa][col] / augmented[row][col]
lowercase__: int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case ):
for row in range(snake_case ):
lowercase__: Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case )
]
def snake_case_ ( snake_case ) -> Callable[[int], int]:
lowercase__: int = len(snake_case )
lowercase__: Matrix = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
lowercase__: Matrix = [[0] for _ in range(snake_case )]
lowercase__: Matrix
lowercase__: int
lowercase__: int
lowercase__: int
for x_val, y_val in enumerate(snake_case ):
for col in range(snake_case ):
lowercase__: List[str] = (x_val + 1) ** (size - col - 1)
lowercase__: str = y_val
lowercase__: Optional[int] = solve(snake_case , snake_case )
def interpolated_func(snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case ) )
return interpolated_func
def snake_case_ ( snake_case ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case_ ( snake_case = question_function , snake_case = 10 ) -> int:
lowercase__: list[int] = [func(snake_case ) for x_val in range(1 , order + 1 )]
lowercase__: list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase__: int = 0
lowercase__: Callable[[int], int]
lowercase__: int
for poly in polynomials:
lowercase__: List[str] = 1
while func(snake_case ) == poly(snake_case ):
x_val += 1
ret += poly(snake_case )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196 |
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Any:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return (-y * np.log(__magic_name__ ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = np.dot(__magic_name__ , __magic_name__ )
return np.sum(y * scores - np.log(1 + np.exp(__magic_name__ ) ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=7_0000 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Tuple = np.zeros(x.shape[1] )
for iterations in range(__magic_name__ ):
UpperCamelCase :str = np.dot(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sigmoid_function(__magic_name__ )
UpperCamelCase :int = np.dot(x.T , h - y ) / y.size
UpperCamelCase :int = theta - alpha * gradient # updating the weights
UpperCamelCase :List[str] = np.dot(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sigmoid_function(__magic_name__ )
UpperCamelCase :List[Any] = cost_function(__magic_name__ , __magic_name__ )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase_ : int = datasets.load_iris()
UpperCAmelCase_ : Dict = iris.data[:, :2]
UpperCAmelCase_ : Dict = (iris.target != 0) * 1
UpperCAmelCase_ : Optional[int] = 0.1
UpperCAmelCase_ : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return sigmoid_function(
np.dot(__magic_name__ , __magic_name__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase_ : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase_ : Tuple = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 38 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42 | 0 |
lowercase__ :Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ :list[bool | None] = [None] * 1000_0000
lowercase__ :Optional[Any] = True
lowercase__ :str = False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase = chain(next_number(lowerCAmelCase__ ) )
lowercase = number_chain
while number < 1000_0000:
lowercase = number_chain
number *= 10
return number_chain
def UpperCamelCase ( lowerCAmelCase__ = 1000_0000 ):
'''simple docstring'''
for i in range(1 , lowerCAmelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
'''simple docstring'''
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_: List[str] ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
SCREAMING_SNAKE_CASE_: int ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowercase (self : str , __a : Optional[Any] , __a : List[Any] ):
# convert to numpy arrays
UpperCAmelCase_ = np.array(UpperCamelCase__ )
UpperCAmelCase_ = np.array(UpperCamelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ = X - np.mean(UpperCamelCase__ )
UpperCAmelCase_ = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
UpperCAmelCase_ = np.linalg.pinv(UpperCamelCase__ )
UpperCAmelCase_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23 | 1 |
"""simple docstring"""
import functools
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase__ ) != 3 or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase__ ) == 0:
return 0
if min(lowerCAmelCase__ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase__ ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCAmelCase_ : Optional[int] = set(lowerCAmelCase__ )
@functools.cache
def dynamic_programming(lowerCAmelCase__ : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(default="""summarization""", metadata={"""include_in_asdict_even_if_is_default""": True} )
_SCREAMING_SNAKE_CASE = Features({"""text""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = Features({"""summary""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = "text"
_SCREAMING_SNAKE_CASE = "summary"
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return {self.text_column: "text", self.summary_column: "summary"}
| 224 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = RobertaConfig
__SCREAMING_SNAKE_CASE : int = 'roberta'
def __init__( self : Optional[int] , A : Tuple ):
super().__init__(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaEmbeddings(UpperCamelCase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = RobertaConfig
__SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[str] , A : List[Any] ):
super().__init__(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config.num_labels
_UpperCAmelCase : Optional[Any] = config.num_hidden_layers
_UpperCAmelCase : List[str] = DeeRobertaModel(UpperCamelCase__ )
_UpperCAmelCase : Dict = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def snake_case_ ( self : List[Any] , A : Tuple=None , A : str=None , A : Tuple=None , A : List[Any]=None , A : List[str]=None , A : int=None , A : Tuple=None , A : str=-1 , A : List[str]=False , ):
_UpperCAmelCase : List[str] = self.num_layers
try:
_UpperCAmelCase : Dict = self.roberta(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
_UpperCAmelCase : Any = outputs[1]
_UpperCAmelCase : str = self.dropout(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = self.classifier(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCAmelCase : Dict = e.message
_UpperCAmelCase : Dict = e.exit_layer
_UpperCAmelCase : str = outputs[0]
if not self.training:
_UpperCAmelCase : int = entropy(UpperCamelCase__ )
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : Dict = MSELoss()
_UpperCAmelCase : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : int = CrossEntropyLoss()
_UpperCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_UpperCAmelCase : str = []
for highway_exit in outputs[-1]:
_UpperCAmelCase : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : int = MSELoss()
_UpperCAmelCase : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : Tuple = CrossEntropyLoss()
_UpperCAmelCase : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
_UpperCAmelCase : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCAmelCase : int = (loss,) + outputs
if not self.training:
_UpperCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCAmelCase : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 367 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = BartphoTokenizer
_a = False
_a = True
def UpperCAmelCase__ ( self : List[str]):
super().setUp()
lowerCAmelCase_ : str = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase_ : int = dict(zip(A_ , range(len(A_))))
lowerCAmelCase_ : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''])
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCAmelCase_ : List[Any] = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self : Any , **A_ : Union[str, Any]):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any]):
lowerCAmelCase_ : Union[str, Any] = '''This is a là test'''
lowerCAmelCase_ : Union[str, Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Any = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCAmelCase_ : Any = '''This is a là test'''
lowerCAmelCase_ : Optional[int] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase_ : int = tokenizer.tokenize(A_)
self.assertListEqual(A_ , A_)
lowerCAmelCase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_)
| 103 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Optional[int] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ["ConvNextFeatureExtractor"]
lowercase__ : List[str] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 180 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='nllb-moe'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=12_81_12 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=10_24 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=1_28 , a_=64 , a_=4 , a_=4 , a_=0.001 , a_=0.001 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Optional[Any] = encoder_layers
__snake_case : Dict = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Dict = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Optional[Any] = dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[Any] = activation_function
__snake_case : int = init_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : Tuple = use_cache
__snake_case : Dict = encoder_layers
__snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : List[str] = router_z_loss_coef
__snake_case : int = router_aux_loss_coef
__snake_case : Union[str, Any] = decoder_sparse_step
__snake_case : Tuple = encoder_sparse_step
__snake_case : int = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Optional[int] = router_dtype
__snake_case : Tuple = router_ignore_padding_tokens
__snake_case : List[Any] = batch_prioritized_routing
__snake_case : Optional[int] = second_expert_policy
__snake_case : Any = normalize_router_prob_before_dropping
__snake_case : List[Any] = moe_eval_capacity_token_fraction
__snake_case : Optional[int] = moe_token_dropout
__snake_case : int = output_router_logits
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , **a_ , )
| 102 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase( lowercase__ ):
lowercase__ = 'biogpt'
def __init__( self , __a=4_23_84 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10_24 , __a=0.02 , __a=1e-12 , __a=True , __a=True , __a=0.0 , __a=0.0 , __a=1 , __a=0 , __a=2 , **__a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
| 358 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 100 | 0 |
from __future__ import annotations
UpperCamelCase__ = 1.6021E-19 # units = C
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
__snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase = logging.getLogger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if metric == "rouge2":
__snake_case : int = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__snake_case : Optional[Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__snake_case : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
__snake_case : Dict = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class _A ( pl.Callback ):
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule , __magic_name__ : str , __magic_name__ : Tuple=True ) -> None:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__snake_case : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__snake_case : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__snake_case : Union[str, Any] = od / """test_results.txt"""
__snake_case : Union[str, Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case : Optional[int] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
__snake_case : Any = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , """a+""" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case : Optional[int] = metrics[key]
if isinstance(UpperCamelCase__ , torch.Tensor ):
__snake_case : List[Any] = val.item()
__snake_case : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
__snake_case : Optional[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(UpperCamelCase__ )
@rank_zero_only
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
try:
__snake_case : int = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case : Optional[Any] = pl_module.model.num_parameters()
__snake_case : Optional[Any] = count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , """test""" )
@rank_zero_only
def lowercase__ ( self : Optional[Any] , __magic_name__ : pl.Trainer , __magic_name__ : Any ) -> Any:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__UpperCAmelCase =_symbol_database.Default()
__UpperCAmelCase =_descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__UpperCAmelCase =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__UpperCAmelCase =None
__UpperCAmelCase =b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__UpperCAmelCase =4_5
__UpperCAmelCase =1_5_8_1
__UpperCAmelCase =1_5_1_7
__UpperCAmelCase =1_5_7_0
__UpperCAmelCase =1_5_8_4
__UpperCAmelCase =1_7_9_3
__UpperCAmelCase =1_7_9_5
__UpperCAmelCase =1_9_1_6
__UpperCAmelCase =1_8_6_4
__UpperCAmelCase =1_9_0_5
__UpperCAmelCase =1_9_1_9
__UpperCAmelCase =2_4_2_9
__UpperCAmelCase =2_2_0_8
__UpperCAmelCase =2_4_1_8
__UpperCAmelCase =2_3_2_3
__UpperCAmelCase =2_4_0_7
# @@protoc_insertion_point(module_scope)
| 67 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( snake_case ):
@staticmethod
@abstractmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def _lowerCamelCase ( self ) -> str:
raise NotImplementedError()
| 249 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
'''simple docstring'''
__UpperCamelCase : Tuple = PandasConfig
def _lowercase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCamelCase : List[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : List[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCamelCase : Dict = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : Dict = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase : Optional[Any] = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
UpperCamelCase : List[Any] = pa.Table.from_pandas(pd.read_pickle(__SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(__SCREAMING_SNAKE_CASE )
| 315 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = IFInpaintingSuperResolutionPipeline
__UpperCamelCase: Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCamelCase: Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__UpperCamelCase: Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _A ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def _A ( self : List[str] , A : Any , A : str=0 ):
if str(A ).startswith("mps" ):
_UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self : List[str] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : int ):
self._test_save_load_local()
def _A ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 31 | '''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = "encodec"
def __init__( self : Optional[int] , A : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A : List[Any]=24000 , A : Union[str, Any]=1 , A : List[Any]=False , A : Optional[int]=None , A : int=None , A : str=128 , A : List[Any]=32 , A : List[Any]=1 , A : int=[8, 5, 4, 2] , A : Optional[int]="weight_norm" , A : List[Any]=7 , A : Any=7 , A : Dict=3 , A : Optional[int]=2 , A : Dict=True , A : Dict="reflect" , A : Any=2 , A : Dict=2 , A : str=1.0 , A : Optional[int]=1024 , A : Any=None , A : Any=True , **A : str , ):
_UpperCAmelCase : Optional[int] = target_bandwidths
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Optional[int] = audio_channels
_UpperCAmelCase : str = normalize
_UpperCAmelCase : int = chunk_length_s
_UpperCAmelCase : str = overlap
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : int = num_filters
_UpperCAmelCase : Optional[Any] = num_residual_layers
_UpperCAmelCase : Optional[int] = upsampling_ratios
_UpperCAmelCase : int = norm_type
_UpperCAmelCase : List[Any] = kernel_size
_UpperCAmelCase : List[Any] = last_kernel_size
_UpperCAmelCase : List[Any] = residual_kernel_size
_UpperCAmelCase : List[str] = dilation_growth_rate
_UpperCAmelCase : Dict = use_causal_conv
_UpperCAmelCase : Tuple = pad_mode
_UpperCAmelCase : Tuple = compress
_UpperCAmelCase : List[str] = num_lstm_layers
_UpperCAmelCase : List[Any] = trim_right_ratio
_UpperCAmelCase : int = codebook_size
_UpperCAmelCase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**A )
@property
def _A ( self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A ( self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _A ( self : str ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 31 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCAmelCase = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ):
"""simple docstring"""
lowercase__ = '''https://pypi.org/pypi/diffusers/json'''
lowercase__ = json.loads(request.urlopen(SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : version.Version(SCREAMING_SNAKE_CASE ) )
def _a ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
init_hf_modules()
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import .xxx`
lowercase__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = False
lowercase__ = [module_file]
lowercase__ = []
# Let's recurse through all relative imports
while not no_change:
lowercase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(SCREAMING_SNAKE_CASE ) )
lowercase__ = Path(SCREAMING_SNAKE_CASE ).parent
lowercase__ = [str(module_path / m ) for m in new_imports]
lowercase__ = [f for f in new_import_files if f not in all_relative_imports]
lowercase__ = [f'{f}.py' for f in new_import_files]
lowercase__ = len(SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(SCREAMING_SNAKE_CASE )
return all_relative_imports
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import xxx`
lowercase__ = re.findall('''^\s*import\s+(\S+)\s*$''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase__ = list(set(SCREAMING_SNAKE_CASE ) )
lowercase__ = []
for imp in imports:
try:
importlib.import_module(SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(SCREAMING_SNAKE_CASE )}`' )
return get_relative_imports(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = module_path.replace(os.path.sep , '''.''' )
lowercase__ = importlib.import_module(SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(SCREAMING_SNAKE_CASE )
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase__ = dict(inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
lowercase__ = cls
return pipeline_class
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = module_file_or_url
lowercase__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase__ = get_diffusers_versions()
# cut ".dev0"
lowercase__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
lowercase__ = f'v{revision}'
elif revision == "main":
lowercase__ = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
lowercase__ = COMMUNITY_PIPELINES_URL.format(revision=SCREAMING_SNAKE_CASE , pipeline=SCREAMING_SNAKE_CASE )
try:
lowercase__ = cached_download(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
lowercase__ = '''git'''
lowercase__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase__ = hf_hub_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
lowercase__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
lowercase__ = check_imports(SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(SCREAMING_SNAKE_CASE )
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase__ = f'{module_needed}.py'
shutil.copy(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = use_auth_token
elif use_auth_token is True:
lowercase__ = HfFolder.get_token()
else:
lowercase__ = None
lowercase__ = model_info(SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase__ = submodule_path / commit_hash
lowercase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
SCREAMING_SNAKE_CASE , f'{module_needed}.py' , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = get_cached_module_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return get_class_in_module(SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : int = '''git_vision_model'''
def __init__( self: Tuple , UpperCamelCase_: Optional[int]=768 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3 , UpperCamelCase_: str=224 , UpperCamelCase_: int=16 , UpperCamelCase_: Any="quick_gelu" , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Tuple=0.02 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowercase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''git'''
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[str]=30_522 , UpperCamelCase_: List[Any]=768 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=1_024 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: str=1E-1_2 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[Any]="absolute" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=101 , UpperCamelCase_: int=102 , UpperCamelCase_: List[str]=None , **UpperCamelCase_: int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
if vision_config is None:
lowercase__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowercase__ = GitVisionConfig(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 93 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCAmelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__lowerCAmelCase = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case_ ( ) -> Optional[Any]:
lowercase__: List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__: Tuple = bs[:]
lowercase__: int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowercase__: Dict = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case_ ( snake_case ) -> Optional[Any]:
lowercase__: str = set()
lowercase__: str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: int = char
return pairs
class __a ( __UpperCamelCase ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowercase__: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowercase__: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: Optional[int] = json.load(lowerCAmelCase__ )
lowercase__: List[Any] = {v: k for k, v in self.encoder.items()}
lowercase__: Any = errors # how to handle errors in decoding
lowercase__: Dict = bytes_to_unicode()
lowercase__: Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='utf-8' ) as merges_handle:
lowercase__: Optional[Any] = merges_handle.read().split('\n' )[1:-1]
lowercase__: int = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: Optional[int] = {}
lowercase__: Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__: int = tuple(lowerCAmelCase__ )
lowercase__: str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowercase__: Tuple = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__: Union[str, Any] = bigram
lowercase__: List[str] = []
lowercase__: Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
lowercase__: str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: List[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: Optional[Any] = tuple(lowerCAmelCase__ )
lowercase__: Optional[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowercase__: int = get_pairs(lowerCAmelCase__ )
lowercase__: Optional[Any] = ' '.join(lowerCAmelCase__ )
lowercase__: Union[str, Any] = word
return word
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowercase__: Tuple = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = ''.join(lowerCAmelCase__ )
lowercase__: List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: str = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
lowercase__: Any = 0
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase__: List[Any] = token_index
writer.write(' '.join(lowerCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: str = [self.sep_token_id]
lowercase__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowercase__: Union[str, Any] = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
lowercase__: int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
lowercase__: Any = ' '.join(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase__: int = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 196 |
def snake_case_ ( snake_case , snake_case ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCamelCase : List[str] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
__UpperCamelCase : List[str] = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCamelCase : Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCamelCase : List[str] = sorted(arg_to_scheduler.keys())
__UpperCamelCase : Dict = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Tuple=None , __magic_name__ :str="base" , __magic_name__ :Any=None , __magic_name__ :List[Any]=None , __magic_name__ :Optional[int]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_SCREAMING_SNAKE_CASE )
a = 0
a = Path(self.hparams.output_dir )
a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
a = config
a = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , _SCREAMING_SNAKE_CASE ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _SCREAMING_SNAKE_CASE , getattr(self.hparams , _SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
a = tokenizer
a = MODEL_MODES[mode]
if model is None:
a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
a = model
def lowerCamelCase__ ( self :List[Any] , *__magic_name__ :str , **__magic_name__ :List[Any] ):
'''simple docstring'''
a = self.model_type.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = arg_to_scheduler[self.hparams.lr_scheduler]
a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model
a = ["bias", "LayerNorm.weight"]
a = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
a = Adafactor(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=_SCREAMING_SNAKE_CASE , relative_step=_SCREAMING_SNAKE_CASE )
else:
a = AdamW(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a = optimizer
a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
return self.validation_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] ):
'''simple docstring'''
return self.validation_end(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any ):
'''simple docstring'''
if stage == "test":
a = len(self.test_dataloader().dataset )
else:
a = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
a = len(self.train_dataloader().dataset )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple = False ):
'''simple docstring'''
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.train_loader
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :int , __magic_name__ :Dict ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
_SCREAMING_SNAKE_CASE , list(filter(_SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.output_dir.joinpath("""best_tfmr""" )
a = self.step_count
self.model.save_pretrained(_SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=_SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=_SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=_SCREAMING_SNAKE_CASE , metavar=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :str ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( pl.Callback ):
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Tuple ):
'''simple docstring'''
a = trainer.lr_schedulers[0]["scheduler"]
a = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Any ):
'''simple docstring'''
rank_zero_info("""***** Validation results *****""" )
a = trainer.callback_metrics
# Log results
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[int] , __magic_name__ :int ):
'''simple docstring'''
rank_zero_info("""***** Test results *****""" )
a = trainer.callback_metrics
# Log and save results to file
a = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(_a ).parent / """test_run""" / """model_checkpoints""" ) , type=_a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_a , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_a )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_a , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=_a , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(_a ).parent / """test_run""" / """dummy-train-data""" ) , type=_a , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[] , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Any:
pl.seed_everything(args.seed )
# init model
a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
a = LoggingCallback()
a = {}
if args.fpaa:
a = 16
if args.gpus > 1:
a = "auto"
a = "ddp"
a = args.accumulate_grad_batches
a = None
a = "auto"
a = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase__ ) == 1:
return list(lowerCamelCase__ )
lowercase__ : List[Any] = 0
lowercase__ : Any = len(lowerCamelCase__ ) - 1
lowercase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 130 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : List[str] ):
lowercase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Any = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).loss
lowercase__ : Dict = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE ).numpy()
lowercase__ : Optional[Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 130 | 1 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__snake_case = True
from torch.cuda.amp import autocast
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a = field(
default=A__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
_a = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
_a = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
_a = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def a ( __a , __a ) -> str:
'''simple docstring'''
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCamelCase__ :int = logging.WARNING
if model_args.verbose_logging:
UpperCamelCase__ :Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCamelCase__ :Optional[int] = logging.INFO
logger.setLevel(__a )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
default=A__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_a = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
_a = field(
default=A__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_a = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_a = field(
default=A__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_a = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class lowercase :
"""simple docstring"""
_a = 42
_a = 42
_a = "longest"
_a = None
_a = None
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.feature_extractor.pad(
UpperCamelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
UpperCamelCase__ :List[Any] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCamelCase__ :Dict = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCamelCase__ :Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCamelCase__ :Optional[int] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCamelCase__ :str = 1
UpperCamelCase__ :Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCamelCase__ :Union[str, Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase_ , min_masks=2 , )
return batch
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=1.0 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :Tuple = max_gumbel_temp
UpperCamelCase__ :Optional[Any] = min_gumbel_temp
UpperCamelCase__ :Optional[Any] = gumbel_temp_decay
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
model.train()
UpperCamelCase__ :List[str] = self._prepare_inputs(UpperCamelCase_ )
if self.use_amp:
with autocast():
UpperCamelCase__ :Any = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase__ :Union[str, Any] = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase__ :Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase__ :List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase__ :Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = parser.parse_args_into_dataclasses()
configure_logger(__a , __a )
# Downloading and loading a dataset from the hub.
UpperCamelCase__ :Any = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ :List[str] = DatasetDict()
UpperCamelCase__ :Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ :Tuple = DatasetDict()
UpperCamelCase__ :List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCamelCase__ :str = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__a )
def prepare_dataset(__a ):
# check that all files have the correct sampling rate
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCamelCase__ :Any = datasets.map(
__a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCamelCase__ :Tuple = vectorized_datasets.filter(
lambda __a : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__a ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCamelCase__ :Optional[int] = vectorized_datasets.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCamelCase__ :str = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCamelCase__ :str = WavaVecaForPreTraining(__a )
UpperCamelCase__ :List[str] = DataCollatorForWavaVecaPretraining(model=__a , feature_extractor=__a )
UpperCamelCase__ :Any = WavaVecaPreTrainer(
model=__a , data_collator=__a , args=__a , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main() | 219 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a ) | 219 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = "char"
SCREAMING_SNAKE_CASE : Optional[Any] = "bpe"
SCREAMING_SNAKE_CASE : List[str] = "wp"
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ["image_processor", "char_tokenizer"]
SCREAMING_SNAKE_CASE : str = "ViTImageProcessor"
SCREAMING_SNAKE_CASE : Any = "MgpstrTokenizer"
def __init__( self : Tuple , _UpperCamelCase : int=None , _UpperCamelCase : List[str]=None , **_UpperCamelCase : str ) ->Union[str, Any]:
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCamelCase , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case_ = tokenizer
snake_case_ = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self : Optional[Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : Any ) ->str:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case_ = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
snake_case_ = self.char_tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ = encodings['''input_ids''']
return inputs
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any ) ->Optional[Any]:
snake_case_, snake_case_, snake_case_ = sequences
snake_case_ = char_preds.size(0 )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''char''' )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''bpe''' )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''wp''' )
snake_case_ = []
snake_case_ = []
for i in range(_UpperCamelCase ):
snake_case_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case_ = scores.index(max(_UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case_ = {}
snake_case_ = final_strs
snake_case_ = final_scores
snake_case_ = char_strs
snake_case_ = bpe_strs
snake_case_ = wp_strs
return out
def snake_case__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->str:
if format == DecodeType.CHARACTER:
snake_case_ = self.char_decode
snake_case_ = 1
snake_case_ = '''[s]'''
elif format == DecodeType.BPE:
snake_case_ = self.bpe_decode
snake_case_ = 2
snake_case_ = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case_ = self.wp_decode
snake_case_ = 1_0_2
snake_case_ = '''[SEP]'''
else:
raise ValueError(f'''Format {format} is not supported.''' )
snake_case_, snake_case_ = [], []
snake_case_ = pred_logits.size(0 )
snake_case_ = pred_logits.size(1 )
snake_case_, snake_case_ = pred_logits.topk(1 , dim=-1 , largest=_UpperCamelCase , sorted=_UpperCamelCase )
snake_case_ = preds_index.view(-1 , _UpperCamelCase )[:, 1:]
snake_case_ = decoder(_UpperCamelCase )
snake_case_, snake_case_ = torch.nn.functional.softmax(_UpperCamelCase , dim=2 ).max(dim=2 )
snake_case_ = preds_max_prob[:, 1:]
for index in range(_UpperCamelCase ):
snake_case_ = preds_str[index].find(_UpperCamelCase )
snake_case_ = preds_str[index][:pred_eos]
snake_case_ = preds_index[index].cpu().tolist()
snake_case_ = pred_index.index(_UpperCamelCase ) if eos_token in pred_index else -1
snake_case_ = preds_max_prob[index][: pred_eos_index + 1]
snake_case_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_UpperCamelCase )
conf_scores.append(_UpperCamelCase )
return dec_strs, conf_scores
def snake_case__( self : int , _UpperCamelCase : List[str] ) ->Any:
snake_case_ = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_UpperCamelCase )]
return decode_strs
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] ) ->List[str]:
return self.bpe_tokenizer.batch_decode(_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_UpperCamelCase )]
return decode_strs | 8 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(R'\s+')
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[len(UpperCamelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__ : Dict =example['''content'''].splitlines()
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=5, UpperCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# first test
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : List[str] =example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tokenizer(example['''content'''], truncation=UpperCamelCase__ )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(example['''content'''] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ={}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__, UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''rb''' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__, UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'''file-{file_number+1:012}.json''')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''') | 152 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = """xlnet"""
__magic_name__ = ["""mems"""]
__magic_name__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=3_2000 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=True , snake_case__="bi" , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0.1 , snake_case__=512 , snake_case__=None , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=-1 , snake_case__=False , snake_case__="last" , snake_case__=True , snake_case__="tanh" , snake_case__=0.1 , snake_case__=5 , snake_case__=5 , snake_case__=5 , snake_case__=1 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : List[str] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_lowerCAmelCase : Tuple = d_model // n_head
_lowerCAmelCase : Optional[int] = ff_activation
_lowerCAmelCase : int = d_inner
_lowerCAmelCase : Any = untie_r
_lowerCAmelCase : List[Any] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : int = dropout
_lowerCAmelCase : int = mem_len
_lowerCAmelCase : Optional[Any] = reuse_len
_lowerCAmelCase : List[Any] = bi_data
_lowerCAmelCase : List[Any] = clamp_len
_lowerCAmelCase : Tuple = same_length
_lowerCAmelCase : Optional[Any] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : int = summary_activation
_lowerCAmelCase : Any = summary_last_dropout
_lowerCAmelCase : Dict = start_n_top
_lowerCAmelCase : Dict = end_n_top
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase : Optional[int] = kwargs['use_cache']
_lowerCAmelCase : str = use_mems_eval
_lowerCAmelCase : List[str] = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self ):
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def a ( self , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCAmelCase ( lowercase ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __UpperCAmelCase ( lowercase ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class a :
_snake_case : int
_snake_case : str
class a ( __snake_case ):
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = [1, 2]
_UpperCAmelCase = {"a": 1, "b": 2}
_UpperCAmelCase = {"a": [1, 2], "b": [3, 4]}
_UpperCAmelCase = {"a": {"1": 1}, "b": 2}
_UpperCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = [2, 3]
_UpperCAmelCase = {"a": 2, "b": 3}
_UpperCAmelCase = {"a": [2, 3], "b": [4, 5]}
_UpperCAmelCase = {"a": {"1": 2}, "b": 3}
_UpperCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
_UpperCAmelCase = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
_UpperCAmelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_UpperCAmelCase = {"a": 2, "b": 0, "c": 2}
_UpperCAmelCase = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda __lowerCAmelCase : x + 1 , A_ , num_proc=A_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = {"a": 1, "b": 2}
_UpperCAmelCase = {"a": 3, "b": 4}
_UpperCAmelCase = {"a": 5, "b": 6}
_UpperCAmelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def lowerCAmelCase_ ( self : Any ):
class a :
_snake_case : int = 'bar'
_UpperCAmelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(A_ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
_UpperCAmelCase = {f'''{i}''': i for i in range(_lowerCAmelCase )}
_UpperCAmelCase = map_nested(lambda lowercase : x + 10 ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a ( __snake_case ):
@require_tf
def lowerCAmelCase_ ( self : Optional[int] ):
import tensorflow as tf
from tensorflow.keras import layers
_UpperCAmelCase = layers.Dense(2 )
def gen_random_output():
_UpperCAmelCase = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
_UpperCAmelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
_UpperCAmelCase = gen_random_output()
_UpperCAmelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
import torch
def gen_random_output():
_UpperCAmelCase = torch.nn.Linear(3 , 2 )
_UpperCAmelCase = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
_UpperCAmelCase = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
_UpperCAmelCase = gen_random_output()
_UpperCAmelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase_ ( self : Optional[Any] ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_UpperCAmelCase = gen_random_output()
with temp_seed(42 ):
_UpperCAmelCase = gen_random_output()
_UpperCAmelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" ,[{}] )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" ,[
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = A(x=1 ,y="""foobar""" )
_UpperCAmelCase = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
_UpperCAmelCase = {"a": {"b": A(x=10 ,y="""foo""" )}, "c": [A(x=20 ,y="""bar""" )]}
_UpperCAmelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 ,y="""foo""" )] )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return text.split()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCAmelCase ( ):
"""simple docstring"""
with Pool(2 ) as pool:
_UpperCAmelCase = list(iflatmap_unordered(_lowerCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_UpperCAmelCase = list(iflatmap_unordered(_lowerCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_UpperCAmelCase = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_lowerCAmelCase ) == 4
| 289 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
UpperCamelCase : List[Any] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=A_ )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dict = faiss.IndexFlat(5 )
UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : int = 1
UpperCamelCase , UpperCamelCase : Dict = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import faiss
UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase : List[Any] = "index.faiss"
UpperCamelCase : List[str] = F"""mock://{index_name}"""
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[int] = 1
UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = Elasticsearch()
UpperCamelCase : Union[str, Any] = {"acknowledged": True}
UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase : str = "foo"
UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase : Dict = "foo"
UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase : Dict = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ )
UpperCamelCase : str = [scores[0] for scores in total_scores]
UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
UpperCamelCase : int = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 )
UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 52 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def A (__A : str , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
inspect_dataset(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def A (__A : int , __A : Dict ) -> Dict:
"""simple docstring"""
inspect_metric(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Any , __A : Optional[Any] , __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : int , __A : Union[str, Any] , __A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def A (__A : int , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def A (__A : List[str] , __A : Any , __A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Any , __A : Optional[Any] , __A : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : Union[str, Any] , __A : Tuple , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 7 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _a ):
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = []
snake_case_ : List[Any] = []
for rt in rc.restypes:
snake_case_ : Optional[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case_ : Union[str, Any] = {name: i for i, name in enumerate(_lowerCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case_ : Tuple = torch.tensor(
_lowerCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
snake_case_ : Optional[int] = torch.tensor(
_lowerCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
snake_case_ : str = torch.tensor(
_lowerCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
snake_case_ : int = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case_ : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
snake_case_ : int = restype_atomaa_mask[protein_aatype]
snake_case_ : Optional[Any] = residx_atomaa_mask
snake_case_ : int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case_ : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
snake_case_ : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case_ : Optional[int] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case_ : str = rc.restype_atoa[restype_letter]
snake_case_ : str = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case_ : List[str] = rc.atom_order[atom_name]
snake_case_ : Optional[Any] = 1
snake_case_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
snake_case_ : Tuple = residx_atomaa_mask
return protein
def __lowercase ( _a ):
snake_case_ : Tuple = tree_map(lambda _a : torch.tensor(_lowerCAmelCase , device=batch['''aatype'''].device ) , _lowerCAmelCase , np.ndarray )
snake_case_ : Dict = tensor_tree_map(lambda _a : np.array(_lowerCAmelCase ) , make_atomaa_masks(_lowerCAmelCase ) )
return out
| 264 |
import math
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :Union[str, Any]=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
UpperCAmelCase__ = n
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # adjacency matrix for weight
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = w
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Dict ) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 169 | 0 |
from collections import deque
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : int = process_name # process name
a : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
a : Optional[Any] = arrival_time
a : Tuple = burst_time # remaining burst time
a : List[Any] = 0 # total time of the process wait in ready queue
a : Any = 0 # time from arrival time to completion time
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
# total number of mlfq's queues
a : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
a : int = time_slices
# unfinished process is in this ready_queue
a : Union[str, Any] = queue
# current time
a : Optional[int] = current_time
# finished process is in this sequence queue
a : deque[Process] = deque()
def __a ( self ) -> list[str]:
a : Optional[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __a ( self , lowerCAmelCase__ ) -> list[int]:
a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __a ( self , lowerCAmelCase__ ) -> list[int]:
a : List[Any] = []
for i in range(len(lowerCAmelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __a ( self , lowerCAmelCase__ ) -> list[int]:
a : List[str] = []
for i in range(len(lowerCAmelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __a ( self , lowerCAmelCase__ ) -> list[int]:
return [q.burst_time for q in queue]
def __a ( self , lowerCAmelCase__ ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __a ( self , lowerCAmelCase__ ) -> deque[Process]:
a : deque[Process] = deque() # sequence deque of finished process
while len(lowerCAmelCase__ ) != 0:
a : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
a : str = 0
# set the process's turnaround time because it is finished
a : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
a : str = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[deque[Process], deque[Process]]:
a : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase__ ) ):
a : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
a : Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
a : int = 0
# set the finish time
a : Any = self.current_time
# update the process' turnaround time because it is finished
a : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __a ( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
a : Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : Any = Process('''P1''', 0, 53)
a : List[str] = Process('''P2''', 0, 17)
a : Optional[int] = Process('''P3''', 0, 68)
a : Union[str, Any] = Process('''P4''', 0, 24)
a : Optional[Any] = 3
a : List[str] = [17, 25]
a : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
a : Optional[Any] = Process('''P1''', 0, 53)
a : str = Process('''P2''', 0, 17)
a : Optional[Any] = Process('''P3''', 0, 68)
a : Optional[int] = Process('''P4''', 0, 24)
a : Optional[Any] = 3
a : str = [17, 25]
a : int = deque([Pa, Pa, Pa, Pa])
a : Any = MLFQ(number_of_queues, time_slices, queue, 0)
a : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : int = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 79 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ) -> Dict:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__lowerCamelCase = timm.create_model('''levit_128s''' , pretrained=UpperCamelCase__ )
else:
__lowerCamelCase = timm.create_model('''levit_128''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 1_92:
__lowerCamelCase = timm.create_model('''levit_192''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 2_56:
__lowerCamelCase = timm.create_model('''levit_256''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 3_84:
__lowerCamelCase = timm.create_model('''levit_384''' , pretrained=UpperCamelCase__ )
from_model.eval()
__lowerCamelCase = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
__lowerCamelCase = OrderedDict()
__lowerCamelCase = from_model.state_dict()
__lowerCamelCase = list(from_model.state_dict().keys() )
__lowerCamelCase = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
__lowerCamelCase = torch.randn((2, 3, 2_24, 2_24) )
__lowerCamelCase = from_model(UpperCamelCase__ )
__lowerCamelCase = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
__lowerCamelCase = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowerCamelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ) -> Union[str, Any]:
__lowerCamelCase = '''imagenet-1k-id2label.json'''
__lowerCamelCase = 10_00
__lowerCamelCase = (1, num_labels)
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
__lowerCamelCase = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__lowerCamelCase = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 67 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
raise NotImplementedError()
| 173 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = None , ):
super().__init__()
lowercase : str = initial_learning_rate
lowercase : Optional[Any] = warmup_steps
lowercase : Union[str, Any] = power
lowercase : List[str] = decay_schedule_fn
lowercase : List[str] = name
def __call__( self , SCREAMING_SNAKE_CASE__ ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : Optional[Any] = tf.cast(SCREAMING_SNAKE_CASE__ , tf.floataa )
lowercase : Tuple = tf.cast(self.warmup_steps , tf.floataa )
lowercase : Optional[Any] = global_step_float / warmup_steps_float
lowercase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 0.9, _UpperCamelCase = 0.9_9_9, _UpperCamelCase = 1e-8, _UpperCamelCase = None, _UpperCamelCase = None, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0, _UpperCamelCase = None, ) ->Any:
"""simple docstring"""
lowercase : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCamelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=_UpperCamelCase, )
if num_warmup_steps:
lowercase : Tuple = WarmUp(
initial_learning_rate=_UpperCamelCase, decay_schedule_fn=_UpperCamelCase, warmup_steps=_UpperCamelCase, )
if weight_decay_rate > 0.0:
lowercase : Tuple = AdamWeightDecay(
learning_rate=_UpperCamelCase, weight_decay_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=_UpperCamelCase, )
else:
lowercase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ = 0.001 , SCREAMING_SNAKE_CASE__ = 0.9 , SCREAMING_SNAKE_CASE__ = 0.999 , SCREAMING_SNAKE_CASE__ = 1E-7 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : str = weight_decay_rate
lowercase : int = include_in_weight_decay
lowercase : str = exclude_from_weight_decay
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = {'''WarmUp''': WarmUp}
return super(SCREAMING_SNAKE_CASE__ , cls ).from_config(SCREAMING_SNAKE_CASE__ , custom_objects=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
super(SCREAMING_SNAKE_CASE__ , self )._prepare_local(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Tuple = list(zip(*SCREAMING_SNAKE_CASE__ ) )
return super(SCREAMING_SNAKE_CASE__ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , name=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Tuple = apply_state or {}
lowercase : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : Dict = self._fallback_apply_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : int = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : str = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return False
return True
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ):
lowercase : Optional[Any] = []
lowercase : Tuple = None
@property
def __lowerCamelCase ( self ):
if self._accum_steps is None:
lowercase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __lowerCamelCase ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE__ ):
if not self._gradients:
lowercase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE__ ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE__ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE__ )}""" )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE__ )
self._accum_steps.assign_add(1 )
def __lowerCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE__ ) )
| 173 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
_lowercase : List[str] = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Optional[int] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase : Union[str, Any] = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
_lowercase : str = os.path.join(self.transformer_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
# Copy consistency with a really long name
_lowercase : List[str] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _UpperCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowercase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowercase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowercase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowercase , _lowercase : Optional[int] = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase , _lowercase : Optional[int] = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
_lowercase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowercase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowercase : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowercase , _lowercase : Optional[Any] = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 250 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =False
_lowercase =nn.Dropout(p=lowerCAmelCase )
_lowercase =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
_lowercase =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
_lowercase =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
_lowercase =TaLayerNorm(lowerCAmelCase )
_lowercase =nn.Dropout(p=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =self.token_embedder(lowerCAmelCase )
_lowercase =encoder_input_tokens.shape[1]
_lowercase =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
_lowercase =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
_lowercase =encoder_input_tokens.size()
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
_lowercase =lyr(lowerCAmelCase , lowerCAmelCase )[0]
_lowercase =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 205 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[Any] = IFImgaImgSuperResolutionPipeline
__lowercase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowercase__: Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowercase__: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 351 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: str = batch_size
lowercase__: Dict = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_input_mask
lowercase__: str = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: int = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: Union[str, Any] = num_choices
lowercase__: int = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[int] = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Tuple = self.prepare_config_and_inputs()
lowercase__: Optional[int] = True
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = True
lowercase__: Optional[Any] = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowercase__: str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Dict = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.num_choices
lowercase__: str = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
lowercase__: int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = NezhaModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__: str = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__: Optional[int] = True
lowercase__: Optional[int] = model_class(config=lowerCAmelCase__ )
lowercase__: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'bert.pt' ) )
lowercase__: List[str] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'bert.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowerCamelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCamelCase = logging.WARNING
def UpperCamelCase ( ):
snake_case : Any = os.getenv("DATASETS_VERBOSITY" , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase ( ):
return __name__.split("." )[0]
def UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase ( ):
# Apply our default configuration to the library root logger.
snake_case : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase ( ):
snake_case : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] = None ):
if name is None:
snake_case : List[Any] = _get_library_name()
return logging.getLogger(__a )
def UpperCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
_get_library_root_logger().setLevel(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
snake_case : Optional[int] = False
def UpperCamelCase ( ):
snake_case : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCAmelCase :
def __init__(self : int , *snake_case__ : str , **snake_case__ : Dict ) -> Optional[int]: # pylint: disable=unused-argument
'''simple docstring'''
snake_case : Optional[int] = args[0] if args else None
def __iter__(self : List[str] ) -> Any:
'''simple docstring'''
return iter(self._iterator )
def __getattr__(self : int , snake_case__ : int ) -> Tuple:
'''simple docstring'''
def empty_fn(*snake_case__ : int , **snake_case__ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self
def __exit__(self : Optional[int] , snake_case__ : int , snake_case__ : Any , snake_case__ : int ) -> Dict:
'''simple docstring'''
return
__lowerCamelCase = True
class UpperCAmelCase :
def __call__(self : Optional[Any] , *snake_case__ : Optional[Any] , snake_case__ : Tuple=False , **snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def _SCREAMING_SNAKE_CASE (self : Any , *snake_case__ : Optional[int] , **snake_case__ : str ) -> int:
'''simple docstring'''
snake_case : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCamelCase = _tqdm_cls()
def UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase ( ):
global _tqdm_active
snake_case : str = True
def UpperCamelCase ( ):
global _tqdm_active
snake_case : str = False
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Any , __snake_case : List[str]=3 , __snake_case : str=32 , __snake_case : Tuple=3 , __snake_case : Dict=10 , __snake_case : List[Any]=[10, 20, 30, 40] , __snake_case : List[Any]=[1, 1, 2, 1] , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : Any="relu" , __snake_case : Union[str, Any]=3 , __snake_case : List[str]=None , ) -> List[str]:
UpperCAmelCase : int = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(__snake_case )
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[Any] ) -> List[Any]:
UpperCAmelCase : int = TFResNetModel(config=__snake_case )
UpperCAmelCase : Tuple = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = TFResNetForImageClassification(__snake_case )
UpperCAmelCase : Tuple = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str ) -> Tuple:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[Any] = TFResNetModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> Dict:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : str ) -> Dict:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : Dict ) -> Any:
pass
def A ( self : Optional[int] ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
UpperCAmelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Dict ) -> str:
def check_hidden_states_output(__snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] ):
UpperCAmelCase : List[Any] = model_class(__snake_case )
UpperCAmelCase : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : int = layer_type
UpperCAmelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A ( self : Tuple ) -> int:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A ( self : str ) -> Optional[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : str ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
UpperCAmelCase : Dict = model(**__snake_case )
# verify the logits
UpperCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase : Any = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1E-4 ) )
| 23 |
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=sys.maxsize ):
"""simple docstring"""
snake_case = 'bilinear'
snake_case = max_size
snake_case = short_edge_length
def __call__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for img in imgs:
snake_case ,snake_case = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case = size * 1.0 / min(lowerCAmelCase , lowerCAmelCase )
if h < w:
snake_case ,snake_case = size, scale * w
else:
snake_case ,snake_case = scale * h, size
if max(lowerCAmelCase , lowerCAmelCase ) > self.max_size:
snake_case = self.max_size * 1.0 / max(lowerCAmelCase , lowerCAmelCase )
snake_case = newh * scale
snake_case = neww * scale
snake_case = int(neww + 0.5 )
snake_case = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case = Image.fromarray(lowerCAmelCase )
snake_case = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case = np.asarray(lowerCAmelCase )
else:
snake_case = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case = nn.functional.interpolate(
lowerCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase ).squeeze(0 )
img_augs.append(lowerCAmelCase )
return img_augs
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case = cfg.INPUT.FORMAT
snake_case = cfg.SIZE_DIVISIBILITY
snake_case = cfg.PAD_VALUE
snake_case = cfg.INPUT.MAX_SIZE_TEST
snake_case = cfg.MODEL.DEVICE
snake_case = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case = lambda lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = tuple(max(lowerCAmelCase ) for s in zip(*[img.shape for img in images] ) )
snake_case = [im.shape[-2:] for im in images]
snake_case = [
nn.functional.pad(
lowerCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase , lowerCAmelCase )
]
return torch.stack(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
snake_case = [images]
if single_image:
assert len(lowerCAmelCase ) == 1
for i in range(len(lowerCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase , images.pop(lowerCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case = torch.tensor([im.shape[:2] for im in images] )
snake_case = self.aug(lowerCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case = [self.normalizer(lowerCAmelCase ) for x in images]
# now pad them to do the following operations
snake_case ,snake_case = self.pad(lowerCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case = torch.true_divide(lowerCAmelCase , lowerCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple[int, int] ) -> Dict:
"""simple docstring"""
assert torch.isfinite(_UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
snake_case ,snake_case = box_size
tensor[:, 0].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=_UpperCamelCase )
| 149 | """simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = "dpr"
def __init__( self : List[str] , _A : Any=3_0522 , _A : List[str]=768 , _A : int=12 , _A : int=12 , _A : List[str]=3072 , _A : Dict="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : int=2 , _A : List[str]=0.02 , _A : List[str]=1e-12 , _A : Dict=0 , _A : List[str]="absolute" , _A : int = 0 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = projection_dim
__SCREAMING_SNAKE_CASE : str = position_embedding_type
| 303 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ : Optional[Any] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowercase_ : int = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Optional[int] = VOCAB_FILES_NAMES
snake_case_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = ["input_ids", "attention_mask"]
snake_case_ : str = GPTaTokenizer
def __init__( self : List[str] , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : str="<|endoftext|>" , snake_case__ : Optional[int]="<|endoftext|>" , snake_case__ : str=False , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
_UpperCAmelCase = kwargs.pop("add_bos_token" , snake_case__ )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case__ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case__ )
_UpperCAmelCase = add_prefix_space
def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : "Conversation" ):
"""simple docstring"""
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 133 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__snake_case = 25_00_04
__snake_case = 25_00_20
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = MBartaaTokenizer
__lowerCamelCase : List[Any] = MBartaaTokenizerFast
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Optional[Any] = True
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Union[str, Any] =MBartaaTokenizer(snake_case__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] ='''<s>'''
UpperCAmelCase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case__ ) , 1054 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple =MBartaaTokenizer(snake_case__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case__ )
UpperCAmelCase : int =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
UpperCAmelCase : Union[str, Any] =tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase : Union[str, Any] =tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] ={'''input_ids''': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Optional[int] =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase : str =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase : Optional[int] =tempfile.mkdtemp()
UpperCAmelCase : Tuple =tokenizer_r.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase : Any =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] =tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase : str =tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Dict =tempfile.mkdtemp()
UpperCAmelCase : Any =tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase : int =tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase : str =tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase : Optional[int] =tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : List[str] =tempfile.mkdtemp()
UpperCAmelCase : Any =tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase : List[str] =tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Union[str, Any] =tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase : Dict =tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[Any] = """facebook/mbart-large-50-one-to-many-mmt"""
__lowerCamelCase : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__lowerCamelCase : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__lowerCamelCase : str = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCAmelCase__ ( cls ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : MBartaaTokenizer =MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
UpperCAmelCase : List[Any] =1
return cls
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_0038 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
UpperCAmelCase : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCAmelCase : str =self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Any =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , snake_case__ )
UpperCAmelCase : Optional[int] =10
UpperCAmelCase : Optional[int] =self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0053, 25_0001] )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =tempfile.mkdtemp()
UpperCAmelCase : int =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : Optional[Any] =MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='''pt''' )
UpperCAmelCase : Dict =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCAmelCase : Union[str, Any] =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str =self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='''pt''' )
UpperCAmelCase : List[str] =self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='''pt''' )
UpperCAmelCase : Any =targets['''input_ids''']
UpperCAmelCase : str =shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_0004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 365 | from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Tuple:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Any = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 78 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_SCREAMING_SNAKE_CASE = """bart"""
_SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase = faiss.StandardGpuResources()
UpperCamelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase = faiss.IndexFlatIP(128 )
UpperCamelCase = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ )
wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase = (None, None)
UpperCamelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase = elia["""train_eli5"""]
UpperCamelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase_ )
return (elia_train, eli5_train_q_index)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_indexes()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_models()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_train_data()
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = [elia_train[int(UpperCamelCase_ )] for i in I[0]]
return nn_examples
def lowercase( UpperCamelCase_ , UpperCamelCase_="wiki40b" , UpperCamelCase_="dense" , UpperCamelCase_=10 ) -> Dict:
'''simple docstring'''
if source == "none":
UpperCamelCase , UpperCamelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase = query_qa_dense_index(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase , UpperCamelCase = query_es_index(
UpperCamelCase_ , UpperCamelCase_ , index_name="""english_wiki40b_snippets_100w""" , n_results=UpperCamelCase_ , )
UpperCamelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase = """question: {} context: {}""".format(UpperCamelCase_ , UpperCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase_ : None),
} )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=64 , UpperCamelCase_=256 , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=0.9_5 , UpperCamelCase_=0.8 ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase = qa_sas_generate(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_SCREAMING_SNAKE_CASE = action_list.index(action_st)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_SCREAMING_SNAKE_CASE = """wiki40b"""
_SCREAMING_SNAKE_CASE = """dense"""
_SCREAMING_SNAKE_CASE = """beam"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 6_4
_SCREAMING_SNAKE_CASE = 2_5_6
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
_SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_SCREAMING_SNAKE_CASE = None
# start main text
_SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
_SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_SCREAMING_SNAKE_CASE = support_list[:1_0]
_SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
_SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
_SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
_SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_SCREAMING_SNAKE_CASE = find_nearest_training(question)
_SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 343 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCAmelCase_ = (720, 1280) # Height, Width
UpperCAmelCase_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCAmelCase_ = 1 / 100
UpperCAmelCase_ = ''
UpperCAmelCase_ = ''
UpperCAmelCase_ = ''
UpperCAmelCase_ = 250
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ ,UpperCamelCase__ : int = get_dataset(__UpperCAmelCase , __UpperCAmelCase )
for index in range(__UpperCAmelCase ):
UpperCamelCase__ : List[str] = random.sample(range(len(__UpperCAmelCase ) ) , 4 )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = update_image_and_anno(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , filter_scale=__UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase__ : List[Any] = random_chars(32 )
UpperCamelCase__ : Dict = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase__ : Any = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(f"{file_root}.jpg" , __UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
UpperCamelCase__ : Any = []
for anno in new_annos:
UpperCamelCase__ : Tuple = anno[3] - anno[1]
UpperCamelCase__ : List[str] = anno[4] - anno[2]
UpperCamelCase__ : List[str] = anno[1] + width / 2
UpperCamelCase__ : Tuple = anno[2] + height / 2
UpperCamelCase__ : Tuple = f"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__UpperCAmelCase )
with open(f"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> tuple[list, list]:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = []
for label_file in glob.glob(os.path.join(__UpperCAmelCase , '''*.txt''' ) ):
UpperCamelCase__ : Dict = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__UpperCAmelCase ) as in_file:
UpperCamelCase__ : Dict = in_file.readlines()
UpperCamelCase__ : str = os.path.join(__UpperCAmelCase , f"{label_name}.jpg" )
UpperCamelCase__ : int = []
for obj_list in obj_lists:
UpperCamelCase__ : Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
UpperCamelCase__ : Tuple = float(obj[1] ) - float(obj[3] ) / 2
UpperCamelCase__ : int = float(obj[2] ) - float(obj[4] ) / 2
UpperCamelCase__ : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
UpperCamelCase__ : Tuple = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCAmelCase )
labels.append(__UpperCAmelCase )
return img_paths, labels
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: list , __UpperCAmelCase: list[int] , __UpperCAmelCase: tuple[int, int] , __UpperCAmelCase: tuple[float, float] , __UpperCAmelCase: float = 0.0 , ) -> tuple[list, list, str]:
UpperCamelCase__ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCamelCase__ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase__ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase__ : Optional[Any] = int(scale_x * output_size[1] )
UpperCamelCase__ : Union[str, Any] = int(scale_y * output_size[0] )
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = []
for i, index in enumerate(__UpperCAmelCase ):
UpperCamelCase__ : int = all_img_list[index]
path_list.append(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = all_annos[index]
UpperCamelCase__ : Dict = cva.imread(__UpperCAmelCase )
if i == 0: # top-left
UpperCamelCase__ : Optional[Any] = cva.resize(__UpperCAmelCase , (divid_point_x, divid_point_y) )
UpperCamelCase__ : List[Any] = img
for bbox in img_annos:
UpperCamelCase__ : str = bbox[1] * scale_x
UpperCamelCase__ : Tuple = bbox[2] * scale_y
UpperCamelCase__ : Dict = bbox[3] * scale_x
UpperCamelCase__ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCamelCase__ : str = cva.resize(__UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
UpperCamelCase__ : Optional[Any] = img
for bbox in img_annos:
UpperCamelCase__ : Tuple = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase__ : List[str] = bbox[2] * scale_y
UpperCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase__ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCamelCase__ : List[Any] = cva.resize(__UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase__ : List[str] = img
for bbox in img_annos:
UpperCamelCase__ : Dict = bbox[1] * scale_x
UpperCamelCase__ : List[Any] = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase__ : int = bbox[3] * scale_x
UpperCamelCase__ : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCamelCase__ : List[Any] = cva.resize(
__UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase__ : Optional[Any] = img
for bbox in img_annos:
UpperCamelCase__ : Dict = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase__ : int = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase__ : Dict = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase__ : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCamelCase__ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 247 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[Any] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : Tuple = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 247 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCamelCase )
UpperCamelCase = checkpoints.load_tax_checkpoint(__UpperCamelCase )
UpperCamelCase = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
UpperCamelCase = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase = F"layers_{str(__UpperCamelCase )}"
# Self-Attention
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
UpperCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
UpperCamelCase = flax_model.params["""encoder"""]["""block"""][str(__UpperCamelCase )]["""layer"""]
UpperCamelCase = tax_attention_key
UpperCamelCase = tax_attention_out
UpperCamelCase = tax_attention_query
UpperCamelCase = tax_attention_value
UpperCamelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase = tax_mlp_wi_a
UpperCamelCase = tax_mlp_wi_a
else:
UpperCamelCase = tax_mlp_wi
UpperCamelCase = tax_mlp_wo
UpperCamelCase = tax_mlp_layer_norm
UpperCamelCase = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
UpperCamelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
UpperCamelCase = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
UpperCamelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase = F"layers_{str(__UpperCamelCase )}"
# Self-Attention
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
UpperCamelCase = tax_enc_dec_attention_module["""key"""]["""kernel"""]
UpperCamelCase = tax_enc_dec_attention_module["""out"""]["""kernel"""]
UpperCamelCase = tax_enc_dec_attention_module["""query"""]["""kernel"""]
UpperCamelCase = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
UpperCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
UpperCamelCase = flax_model.params["""decoder"""]["""block"""][str(__UpperCamelCase )]["""layer"""]
UpperCamelCase = tax_attention_key
UpperCamelCase = tax_attention_out
UpperCamelCase = tax_attention_query
UpperCamelCase = tax_attention_value
UpperCamelCase = tax_pre_attention_layer_norm
UpperCamelCase = tax_enc_dec_attention_key
UpperCamelCase = tax_enc_dec_attention_out
UpperCamelCase = tax_enc_dec_attention_query
UpperCamelCase = tax_enc_dec_attention_value
UpperCamelCase = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase = tax_mlp_wi_a
UpperCamelCase = tax_mlp_wi_a
else:
UpperCamelCase = tax_mlp_wi
UpperCamelCase = tax_mlp_wo
UpperCamelCase = txa_mlp_layer_norm
UpperCamelCase = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
UpperCamelCase = txa_decoder_norm
# Only for layer 0:
UpperCamelCase = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
UpperCamelCase = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
UpperCamelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(__UpperCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__lowerCamelCase : List[Any] = HfApi()
__lowerCamelCase : int = {}
# fmt: off
__lowerCamelCase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__lowerCamelCase : List[str] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__lowerCamelCase : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__lowerCamelCase : Optional[int] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__lowerCamelCase : Optional[int] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__lowerCamelCase : Any = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__lowerCamelCase : Dict = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__lowerCamelCase : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__lowerCamelCase : int = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__lowerCamelCase : Tuple = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__lowerCamelCase : Any = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__lowerCamelCase : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__lowerCamelCase : List[Any] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__lowerCamelCase : int = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__lowerCamelCase : Any = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__lowerCamelCase : str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__lowerCamelCase : Optional[int] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
__lowerCamelCase : Any = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__lowerCamelCase : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__lowerCamelCase : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 359 | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 204 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a__ : List[Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="vision-encoder-decoder"
SCREAMING_SNAKE_CASE_ : Tuple =True
def __init__( self : List[str] , **__A : List[str] ):
super().__init__(**__A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__UpperCamelCase = kwargs.pop('encoder' )
__UpperCamelCase = encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('decoder' )
__UpperCamelCase = decoder_config.pop('model_type' )
__UpperCamelCase = AutoConfig.for_model(__A , **__A )
__UpperCamelCase = AutoConfig.for_model(__A , **__A )
__UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : Any , __A : PretrainedConfig , __A : PretrainedConfig , **__A : Union[str, Any] ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__UpperCamelCase = True
__UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.encoder.to_dict()
__UpperCamelCase = self.decoder.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =version.parse("1.11" )
@property
def _lowerCamelCase ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
@property
def _lowerCamelCase ( self : List[str] ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : int ):
__UpperCamelCase = OrderedDict()
__UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _lowerCamelCase ( self : Union[str, Any] , __A : "PreTrainedTokenizerBase" , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional["TensorType"] = None , ):
import torch
__UpperCamelCase = OrderedDict()
__UpperCamelCase = super().generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
__UpperCamelCase , __UpperCamelCase = dummy_input['input_ids'].shape
__UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCamelCase = dummy_input.pop('input_ids' )
__UpperCamelCase = dummy_input.pop('attention_mask' )
__UpperCamelCase = torch.zeros(__A )
return common_inputs
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : str ):
pass
def _lowerCamelCase ( self : Optional[int] , __A : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : PretrainedConfig , __A : PretrainedConfig , __A : str = "default" ):
__UpperCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__A , __A )
| 53 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=1_92 ,depths=(2, 2, 18, 2) ,num_heads=(6, 12, 24, 48) ,window_size=12 ,out_features=["""stage2""", """stage3""", """stage4"""] ,)
_SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=a_ ,num_queries=9_00 ,encoder_ffn_dim=20_48 ,decoder_ffn_dim=20_48 ,num_feature_levels=5 ,assign_first_stage=a_ ,with_box_refine=a_ ,two_stage=a_ ,)
# set labels
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "o365" in model_name:
_SCREAMING_SNAKE_CASE = 3_66
_SCREAMING_SNAKE_CASE = """object365-id2label.json"""
else:
_SCREAMING_SNAKE_CASE = 91
_SCREAMING_SNAKE_CASE = """coco-detection-id2label.json"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(a_ ,a_ ,repo_type="""dataset""" ) ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(a_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(a_ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_SCREAMING_SNAKE_CASE = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(a_ ,stream=a_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_deta_config(a_ )
# load original state dict
if model_name == "deta-swin-large":
_SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" ,filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
_SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" ,filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F'Model name {model_name} not supported' )
_SCREAMING_SNAKE_CASE = torch.load(a_ ,map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(a_ ,param.shape )
# rename keys
_SCREAMING_SNAKE_CASE = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ ,a_ ,a_ )
read_in_swin_q_k_v(a_ ,config.backbone_config )
read_in_decoder_q_k_v(a_ ,a_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_SCREAMING_SNAKE_CASE = state_dict.pop(a_ )
_SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
_SCREAMING_SNAKE_CASE = state_dict.pop(a_ )
_SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_SCREAMING_SNAKE_CASE = state_dict.pop(a_ )
_SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE = DetaForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(a_ )
# load image processor
_SCREAMING_SNAKE_CASE = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = processor(images=a_ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
_SCREAMING_SNAKE_CASE = model(pixel_values.to(a_ ) )
# verify logits
print("""Logits:""" ,outputs.logits[0, :3, :3] )
print("""Boxes:""" ,outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] ,expected_logits.to(a_ ) ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,expected_boxes.to(a_ ) ,atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 306 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCamelCase = False
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a=32 ) -> int:
"""simple docstring"""
set_seed(0 )
UpperCAmelCase__ = UNetaDModel(sample_size=__a , in_channels=3 , out_channels=3 )
UpperCAmelCase__ = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__a , )
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCAmelCase__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__a ) for _ in range(4 )]
UpperCAmelCase__ = [torch.randn((4, 3, 32, 32) ).to(__a ) for _ in range(4 )]
UpperCAmelCase__ = [torch.randint(0 , 1000 , (4,) ).long().to(__a ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCAmelCase__ , UpperCAmelCase__ = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase__ = model(__a , timesteps[i] ).sample
UpperCAmelCase__ = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCAmelCase__ , UpperCAmelCase__ = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase__ = model(__a , timesteps[i] ).sample
UpperCAmelCase__ = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
UpperCAmelCase : int = True
from torch.cuda.amp import autocast
UpperCAmelCase : List[Any] = logging.getLogger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __lowerCAmelCase :
_lowercase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_lowercase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowercase : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_lowercase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""})
_lowercase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""})
_lowercase : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
_lowercase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
_lowercase : Optional[float] = field(
default=0.0_5 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
_lowercase : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""})
@dataclass
class __lowerCAmelCase :
_lowercase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_lowercase : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_lowercase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_lowercase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowercase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
_lowercase : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class __lowerCAmelCase :
_lowercase : WavaVecaProcessor
_lowercase : Union[bool, str] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
def __call__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : Optional[Any] =[{"input_values": feature["input_values"]} for feature in features]
a__ : Any =[{"input_ids": feature["labels"]} for feature in features]
a__ : Union[str, Any] =self.processor.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
a__ : str =self.processor.pad(
labels=lowerCamelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
a__ : str =labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
a__ : Optional[int] =labels
return batch
class __lowerCAmelCase ( lowerCAmelCase_):
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
model.train()
a__ : Any =self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
a__ : Any =self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
else:
a__ : List[str] =self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
a__ : List[str] =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ : Union[str, Any] =loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
a__ : Optional[Any] =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : Any =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : Optional[Any] =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a__ : Tuple =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : str =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
a__ : Dict =datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
a__ : Tuple =datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
a__ : Union[str, Any] =f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(SCREAMING_SNAKE_CASE : str ):
a__ : List[Any] =re.sub(_SCREAMING_SNAKE_CASE , "" , batch["sentence"] ).lower() + " "
return batch
a__ : Optional[Any] =train_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["sentence"] )
a__ : Optional[Any] =eval_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["sentence"] )
def extract_all_chars(SCREAMING_SNAKE_CASE : Tuple ):
a__ : int =" ".join(batch["text"] )
a__ : str =list(set(_SCREAMING_SNAKE_CASE ) )
return {"vocab": [vocab], "all_text": [all_text]}
a__ : Optional[int] =train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , )
a__ : Optional[Any] =train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , )
a__ : List[Any] =list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
a__ : List[Any] ={v: k for k, v in enumerate(_SCREAMING_SNAKE_CASE )}
a__ : int =vocab_dict[" "]
del vocab_dict[" "]
a__ : Optional[Any] =len(_SCREAMING_SNAKE_CASE )
a__ : List[str] =len(_SCREAMING_SNAKE_CASE )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Any =WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
a__ : int =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE )
a__ : Dict =WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
a__ : int =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
a__ : Optional[int] =min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
a__ : Any =train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
if data_args.max_val_samples is not None:
a__ : Optional[Any] =eval_dataset.select(range(data_args.max_val_samples ) )
a__ : Tuple =torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE : Any ):
a__ , a__ : Optional[Any] =torchaudio.load(batch["path"] )
a__ : Optional[Any] =resampler(_SCREAMING_SNAKE_CASE ).squeeze().numpy()
a__ : int =16_000
a__ : int =batch["text"]
return batch
a__ : int =train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
a__ : List[Any] =eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE : List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
a__ : Any =processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_SCREAMING_SNAKE_CASE )
return batch
a__ : Any =train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
a__ : List[Any] =eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
# Metric
a__ : Any =datasets.load_metric("wer" )
def compute_metrics(SCREAMING_SNAKE_CASE : Optional[int] ):
a__ : Union[str, Any] =pred.predictions
a__ : int =np.argmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__ : Dict =processor.tokenizer.pad_token_id
a__ : List[str] =processor.batch_decode(_SCREAMING_SNAKE_CASE )
# we do not want to group tokens when computing the metrics
a__ : List[Any] =processor.batch_decode(pred.label_ids , group_tokens=_SCREAMING_SNAKE_CASE )
a__ : List[Any] =wer_metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
a__ : List[str] =DataCollatorCTCWithPadding(processor=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# Initialize our Trainer
a__ : Dict =CTCTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a__ : int =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
a__ : Optional[int] =model_args.model_name_or_path
else:
a__ : Optional[Any] =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
a__ : List[Any] =trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
a__ : str =train_result.metrics
a__ : Union[str, Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
a__ : str =min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
a__ : Optional[int] ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict =trainer.evaluate()
a__ : int =data_args.max_val_samples if data_args.max_val_samples is not None else len(_SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 95 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : Any = ["image_processor", "tokenizer"]
__A : List[Any] = "CLIPImageProcessor"
__A : List[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , **lowercase_ : Tuple ) -> Any:
lowercase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Any = kwargs.pop("feature_extractor" )
lowercase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : int , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , **lowercase_ : Union[str, Any] ) -> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Tuple , *lowercase_ : Dict , **lowercase_ : Optional[Any] ) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : Optional[int] = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( lowercase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCamelCase : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
_lowerCamelCase : List[Any] = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : List[Any] = nlp
_lowerCamelCase : Optional[Any] = reader
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : Optional[int] = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=lowercase , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=lowercase , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=lowercase , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=lowercase , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=lowercase , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=lowercase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowercase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowercase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self._nlp, []
for entry in self._reader:
_lowerCamelCase : Any = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCamelCase : Optional[Any] = self._reader.save_binary(lowercase )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(lowercase ) | 96 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__ ( snake_case__ , snake_case__ , snake_case__ ):
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Embedding(_A , _A )
__lowerCAmelCase = nn.Embedding(_A , _A )
__lowerCAmelCase = False
__lowerCAmelCase = nn.Dropout(p=_A )
__lowerCAmelCase = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
__lowerCAmelCase = nn.ModuleList()
for lyr_num in range(_A ):
__lowerCAmelCase = TaBlock(_A )
self.encoders.append(_A )
__lowerCAmelCase = TaLayerNorm(_A )
__lowerCAmelCase = nn.Dropout(p=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.token_embedder(_A )
__lowerCAmelCase = encoder_input_tokens.shape[1]
__lowerCAmelCase = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
__lowerCAmelCase = self.dropout_pre(_A )
# inverted the attention mask
__lowerCAmelCase = encoder_input_tokens.size()
__lowerCAmelCase = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
__lowerCAmelCase = lyr(_A , _A )[0]
__lowerCAmelCase = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 102 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case = open # noqa: we just need to have a builtin inside this module to test it properly
| 354 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def A_ ( *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
class lowercase__ ( unittest.TestCase ):
@require_torch
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_classifier(UpperCAmelCase_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase_ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
SCREAMING_SNAKE_CASE__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
] , )
@require_tf
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_classifier(UpperCAmelCase_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
SCREAMING_SNAKE_CASE__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase_ )},
],
] , )
@slow
@require_torch
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_classifier(UpperCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
SCREAMING_SNAKE_CASE__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_classifier(UpperCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
SCREAMING_SNAKE_CASE__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 169 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_A : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
_A : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_, snake_case_, snake_case_ :List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", _lowercase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ :Optional[Any] = processors[data_args.task_name]()
snake_case_ :Tuple = processor.get_labels()
snake_case_ :int = len(_lowercase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ :List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
snake_case_ :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
snake_case_ :Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
snake_case_ :List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
snake_case_ :Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
snake_case_ :Union[str, Any] = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(_lowercase, p.label_ids )}
# Data collator
snake_case_ :List[str] = DataCollatorWithPadding(_lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ :Optional[Any] = Trainer(
model=_lowercase, args=_lowercase, train_dataset=_lowercase, eval_dataset=_lowercase, compute_metrics=_lowercase, data_collator=_lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ :Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ :List[str] = trainer.evaluate()
snake_case_ :Union[str, Any] = os.path.join(training_args.output_dir, """eval_results.txt""" )
if trainer.is_world_master():
with open(_lowercase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""", _lowercase, _lowercase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_lowercase )
return results
def A_ ( _lowercase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dataset
lowerCamelCase_ =process
lowerCamelCase_ =params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.dataset[i]
lowerCamelCase_ =self.process(lowerCAmelCase, **self.params )
return processed
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =loader
lowerCamelCase_ =infer
lowerCamelCase_ =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase_ =None
lowerCamelCase_ =loader_batch_size
# Internal bookkeeping
lowerCamelCase_ =None
lowerCamelCase_ =None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =iter(self.loader )
return self
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase_ =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase_ ={}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCAmelCase, lowerCAmelCase ):
# Convert ModelOutput to tuple first
lowerCamelCase_ =element.to_tuple()
if isinstance(element[0], torch.Tensor ):
lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase, lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase_ =None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ =np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase_ =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase_ =self._loader_batch_data.__class__(lowerCAmelCase )
self._loader_batch_index += 1
return result
def lowercase__ ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase_ =next(self.iterator )
lowerCamelCase_ =self.infer(lowerCAmelCase, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =processed
else:
lowerCamelCase_ =list(processed.keys() )[0]
lowerCamelCase_ =processed[key]
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =len(lowerCAmelCase )
else:
lowerCamelCase_ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ =observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase_ =processed
lowerCamelCase_ =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
super().__init__(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =iter(self.loader )
lowerCamelCase_ =None
return self
def lowercase__ ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase_ =self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
lowerCamelCase_ =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase_ =self.infer(next(self.iterator ), **self.params )
lowerCamelCase_ =next(self.subiterator )
return processed
class __UpperCamelCase ( lowerCamelCase__ ):
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =iter(self.loader )
return self
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ =self.loader_batch_item()
lowerCamelCase_ =item.pop('''is_last''' )
accumulator.append(lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
lowerCamelCase_ =self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =processed
else:
lowerCamelCase_ =list(processed.keys() )[0]
lowerCamelCase_ =processed[key]
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =len(lowerCAmelCase )
else:
lowerCamelCase_ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ =observed_batch_size
lowerCamelCase_ =processed
lowerCamelCase_ =0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ =self.loader_batch_item()
lowerCamelCase_ =item.pop('''is_last''' )
accumulator.append(lowerCAmelCase )
if is_last:
return accumulator
else:
lowerCamelCase_ =processed
lowerCamelCase_ =item.pop('''is_last''' )
accumulator.append(lowerCAmelCase )
return accumulator
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dataset
lowerCamelCase_ =key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return self.dataset[i][self.key]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dataset
lowerCamelCase_ =keya
lowerCamelCase_ =keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
'''simple docstring'''
import operator as op
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = lambda A__ , A__ : int(x / y ) # noqa: E731 integer division operation
__lowercase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(A__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(A__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
else:
__lowercase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
__lowercase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(A__ ) , int(A__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCAmelCase__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 104 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 253 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''table-transformer'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=100 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="sine" , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Optional[Any] = num_queries
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : int = decoder_ffn_dim
UpperCAmelCase_ : int = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Any = init_xavier_std
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : Dict = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Tuple = dilation
# Hungarian matcher
UpperCAmelCase_ : Optional[Any] = class_cost
UpperCAmelCase_ : List[Any] = bbox_cost
UpperCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Optional[int] = mask_loss_coefficient
UpperCAmelCase_ : List[str] = dice_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 12
| 253 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
lowerCamelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Union[str, Any] = {}
if accepts_eta:
lowerCamelCase : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase : int = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowerCamelCase : int = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Tuple = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase : List[str] = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : str = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 48 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase__ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **A : List[Any]) -> List[str]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCAmelCase = deprecated_arg[3:]
_UpperCAmelCase = not kwargs.pop(A__)
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
_UpperCAmelCase = kwargs.pop('tpu_name' , self.tpu_name)
_UpperCAmelCase = kwargs.pop('device_idx' , self.device_idx)
_UpperCAmelCase = kwargs.pop('eager_mode' , self.eager_mode)
_UpperCAmelCase = kwargs.pop('use_xla' , self.use_xla)
super().__init__(**A__)
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Name of TPU'''} , )
UpperCamelCase = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['tf'])
_UpperCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_UpperCAmelCase = None
return tpu
@cached_property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['tf'])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
_UpperCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU')
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}")
else:
tf.config.set_visible_devices([] , 'GPU') # disable GPU
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}")
return strategy
@property
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['tf'])
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
requires_backends(self , ['tf'])
return self._setup_strategy
@property
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
requires_backends(self , ['tf'])
return tf.config.list_physical_devices('GPU')
@property
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(self , ['tf'])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
return self.n_gpu > 0
| 359 |
import string
import numpy
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase )
class __lowerCAmelCase :
UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase = numpy.vectorize(lambda A : x % 3_6 )
UpperCamelCase = numpy.vectorize(A )
def __init__( self : Tuple , A : numpy.ndarray) -> None:
"""simple docstring"""
_UpperCAmelCase = self.modulus(A) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_UpperCAmelCase = encrypt_key.shape[0]
def _lowerCamelCase ( self : str , A : str) -> int:
"""simple docstring"""
return self.key_string.index(A)
def _lowerCamelCase ( self : Any , A : int) -> str:
"""simple docstring"""
return self.key_string[round(A)]
def _lowerCamelCase ( self : Union[str, Any]) -> None:
"""simple docstring"""
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_UpperCAmelCase = det % len(self.key_string)
_UpperCAmelCase = len(self.key_string)
if greatest_common_divisor(A , len(self.key_string)) != 1:
_UpperCAmelCase = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(A)
def _lowerCamelCase ( self : Union[str, Any] , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = [char for char in text.upper() if char in self.key_string]
_UpperCAmelCase = chars[-1]
while len(A) % self.break_key != 0:
chars.append(A)
return "".join(A)
def _lowerCamelCase ( self : Union[str, Any] , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = self.process_text(text.upper())
_UpperCAmelCase = ''
for i in range(0 , len(A) - self.break_key + 1 , self.break_key):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(A) for char in batch]
_UpperCAmelCase = numpy.array([vec]).T
_UpperCAmelCase = self.modulus(self.encrypt_key.dot(A)).T.tolist()[
0
]
_UpperCAmelCase = ''.join(
self.replace_digits(A) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowerCamelCase ( self : Optional[Any]) -> numpy.ndarray:
"""simple docstring"""
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_UpperCAmelCase = det % len(self.key_string)
_UpperCAmelCase = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_UpperCAmelCase = i
break
_UpperCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(A))
def _lowerCamelCase ( self : Tuple , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = self.make_decrypt_key()
_UpperCAmelCase = self.process_text(text.upper())
_UpperCAmelCase = ''
for i in range(0 , len(A) - self.break_key + 1 , self.break_key):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(A) for char in batch]
_UpperCAmelCase = numpy.array([vec]).T
_UpperCAmelCase = self.modulus(decrypt_key.dot(A)).T.tolist()[0]
_UpperCAmelCase = ''.join(
self.replace_digits(A) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = int(input('Enter the order of the encryption key: ' ) )
_UpperCAmelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_UpperCAmelCase ):
_UpperCAmelCase = [int(_UpperCAmelCase ) for x in input().split()]
hill_matrix.append(_UpperCAmelCase )
_UpperCAmelCase = HillCipher(numpy.array(_UpperCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_UpperCAmelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_UpperCAmelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_UpperCAmelCase ) )
elif option == "2":
_UpperCAmelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 290 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a__ : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "".join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCAmelCase__ (lowerCAmelCase_ = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(9_9)}") | 360 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" ,[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
__lowerCAmelCase : Dict = DatasetInfosDict.from_directory(__snake_case )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" ,[
DatasetInfo(),
DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : Any = str(__snake_case )
dataset_info.write_to_directory(__snake_case )
__lowerCAmelCase : List[Any] = DatasetInfo.from_directory(__snake_case )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__snake_case ,"dataset_info.json" ) )
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : Optional[int] = DatasetInfo(
description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,)
__lowerCAmelCase : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(__snake_case ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
__lowerCAmelCase : List[str] = yaml.safe_dump(__snake_case )
__lowerCAmelCase : Dict = yaml.safe_load(__snake_case )
assert dataset_info_yaml_dict == reloaded
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : Optional[Any] = DatasetInfo()
__lowerCAmelCase : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" ,[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : List[Any] = str(__snake_case )
dataset_infos_dict.write_to_directory(__snake_case )
__lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(__snake_case )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCAmelCase : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCAmelCase : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__snake_case ,"README.md" ) ) | 269 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return 1 if input_a == input_a else 0
def UpperCAmelCase__ ( ) -> None:
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 362 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
A_ = len(UpperCAmelCase__ )
A_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
A_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
A_ = subset[i - 1][j]
if arr[i - 1] <= j:
A_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> list:
if len(UpperCAmelCase_ ) != 2 or len(a[0] ) != 2 or len(UpperCAmelCase_ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : List[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> Optional[Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCAmelCase_ ) )
]
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCAmelCase_ ) )
]
def UpperCAmelCase__ ( UpperCAmelCase_ : list ) -> tuple[list, list, list, list]:
if len(UpperCAmelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : int = len(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = matrix_length // 2
__lowerCamelCase : Any = [[a[i][j] for j in range(UpperCAmelCase_ , UpperCAmelCase_ )] for i in range(UpperCAmelCase_ )]
__lowerCamelCase : int = [
[a[i][j] for j in range(UpperCAmelCase_ , UpperCAmelCase_ )] for i in range(UpperCAmelCase_ , UpperCAmelCase_ )
]
__lowerCamelCase : List[Any] = [[a[i][j] for j in range(UpperCAmelCase_ )] for i in range(UpperCAmelCase_ )]
__lowerCamelCase : List[str] = [[a[i][j] for j in range(UpperCAmelCase_ )] for i in range(UpperCAmelCase_ , UpperCAmelCase_ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase__ ( UpperCAmelCase_ : list ) -> tuple[int, int]:
return len(UpperCAmelCase_ ), len(matrix[0] )
def UpperCAmelCase__ ( UpperCAmelCase_ : list ) -> None:
print('\n'.join(str(UpperCAmelCase_ ) for line in matrix ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> list:
if matrix_dimensions(UpperCAmelCase_ ) == (2, 2):
return default_matrix_multiplication(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = split_matrix(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = split_matrix(UpperCAmelCase_ )
__lowerCamelCase : Dict = actual_strassen(UpperCAmelCase_ , matrix_subtraction(UpperCAmelCase_ , UpperCAmelCase_ ) )
__lowerCamelCase : Union[str, Any] = actual_strassen(matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
__lowerCamelCase : Tuple = actual_strassen(matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = actual_strassen(UpperCAmelCase_ , matrix_subtraction(UpperCAmelCase_ , UpperCAmelCase_ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) , matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) )
__lowerCamelCase : List[Any] = actual_strassen(matrix_subtraction(UpperCAmelCase_ , UpperCAmelCase_ ) , matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) )
__lowerCamelCase : str = actual_strassen(matrix_subtraction(UpperCAmelCase_ , UpperCAmelCase_ ) , matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) )
__lowerCamelCase : Optional[int] = matrix_addition(matrix_subtraction(matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) , UpperCAmelCase_ )
__lowerCamelCase : List[str] = matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) , UpperCAmelCase_ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[str] = []
for i in range(len(UpperCAmelCase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(UpperCAmelCase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> list:
if matrix_dimensions(UpperCAmelCase_ )[1] != matrix_dimensions(UpperCAmelCase_ )[0]:
__lowerCamelCase : List[Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = matrix_dimensions(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = matrix_dimensions(UpperCAmelCase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : Dict = max(*UpperCAmelCase_ , *UpperCAmelCase_ )
__lowerCamelCase : int = int(math.pow(2 , math.ceil(math.loga(UpperCAmelCase_ ) ) ) )
__lowerCamelCase : int = matrixa
__lowerCamelCase : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , UpperCAmelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : Tuple = actual_strassen(UpperCAmelCase_ , UpperCAmelCase_ )
# Removing the additional zeros
for i in range(0 , UpperCAmelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A__ : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A__ : Dict = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : List[str] = {"""vocab_file""": """spm_char.model"""}
A__ : str = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
A__ : List[Any] = {
"""microsoft/speecht5_asr""": 1024,
"""microsoft/speecht5_tts""": 1024,
"""microsoft/speecht5_vc""": 1024,
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None:
__lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self ) -> List[str]:
return self.sp_model.get_piece_size()
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
__lowerCamelCase : Dict = self.__dict__.copy()
__lowerCamelCase : int = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__lowerCamelCase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = [1]
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 364 |
"""simple docstring"""
from __future__ import annotations
from random import random
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : int | None = None ):
_snake_case = value
_snake_case = random()
_snake_case = None
_snake_case = None
def __repr__( self : int ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Optional[int] ):
_snake_case = str(self.value ) + ''' '''
_snake_case = str(self.left or '''''' )
_snake_case = str(self.right or '''''' )
return value + left + right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case = split(root.left , __lowerCamelCase )
return left, root
else:
_snake_case , _snake_case = split(root.right , __lowerCamelCase )
return root, right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case = merge(left.right , __lowerCamelCase )
return left
else:
_snake_case = merge(__lowerCamelCase , right.left )
return right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case = Node(__lowerCamelCase )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case , _snake_case = split(__lowerCamelCase , value - 1 )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _UpperCAmelCase ( ) -> None:
_snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
_snake_case = input()
while args != "q":
_snake_case = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
_snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowercase : Optional[int] = 16
__lowercase : List[str] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : List[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : List[Any] = 16
elif accelerator.mixed_precision != "no":
__a : Any = 8
else:
__a : Optional[int] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
__a : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowercase : Tuple = mocked_dataloaders # noqa: F811
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1":
__a : Optional[int] = 2
# New Code #
__a : Union[str, Any] = int(args.gradient_accumulation_steps )
__a : Dict = int(args.local_sgd_steps )
# Initialize accelerator
__a : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : Any = int(config['num_epochs'] )
__a : int = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Any = evaluate.load('glue' , 'mrpc' )
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Dict = model.to(accelerator.device )
# Instantiate optimizer
__a : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__a : Any = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Any = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , local_sgd_steps=_SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : int = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Dict = model(**_SCREAMING_SNAKE_CASE )
__a : Tuple = outputs.logits.argmax(dim=-1 )
__a , __a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a : Optional[Any] = parser.parse_args()
__a : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''efficientformer'''
def __init__(self , __magic_name__ = [3, 2, 6, 4] , __magic_name__ = [48, 96, 224, 448] , __magic_name__ = [True, True, True, True] , __magic_name__ = 448 , __magic_name__ = 32 , __magic_name__ = 4 , __magic_name__ = 7 , __magic_name__ = 5 , __magic_name__ = 8 , __magic_name__ = 4 , __magic_name__ = 0.0 , __magic_name__ = 16 , __magic_name__ = 3 , __magic_name__ = 3 , __magic_name__ = 3 , __magic_name__ = 2 , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = 1e-5 , __magic_name__ = "gelu" , __magic_name__ = 0.02 , __magic_name__ = 1e-12 , __magic_name__ = 224 , __magic_name__ = 1e-05 , **__magic_name__ , ) -> None:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[Any] = hidden_sizes
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Optional[int] = patch_size
snake_case_ : List[Any] = num_channels
snake_case_ : str = depths
snake_case_ : Optional[Any] = mlp_expansion_ratio
snake_case_ : List[str] = downsamples
snake_case_ : Tuple = dim
snake_case_ : List[str] = key_dim
snake_case_ : List[Any] = attention_ratio
snake_case_ : List[Any] = resolution
snake_case_ : Tuple = pool_size
snake_case_ : Any = downsample_patch_size
snake_case_ : List[str] = downsample_stride
snake_case_ : Union[str, Any] = downsample_pad
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : Optional[Any] = num_metaad_blocks
snake_case_ : str = distillation
snake_case_ : int = use_layer_scale
snake_case_ : List[str] = layer_scale_init_value
snake_case_ : Union[str, Any] = image_size
snake_case_ : Optional[int] = batch_norm_eps
| 279 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier:
"""simple docstring"""
snake_case_ : Optional[Any] = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : str = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
snake_case_ : int = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Any = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : torch.LongTensor , lowerCAmelCase_ : torch.FloatTensor , **lowerCAmelCase_ : List[str] ) -> bool:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None ) -> Tuple:
__lowerCAmelCase = max_length
__lowerCAmelCase = max_position_embeddings
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : torch.LongTensor , lowerCAmelCase_ : torch.FloatTensor , **lowerCAmelCase_ : Union[str, Any] ) -> bool:
__lowerCAmelCase = input_ids.shape[-1]
__lowerCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'exceptions, performance degradation, or nothing at all.' )
return is_done
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'with `max_length = start_length + max_new_tokens` instead.' , lowerCAmelCase_ , )
__lowerCAmelCase = start_length
__lowerCAmelCase = max_new_tokens
__lowerCAmelCase = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Tuple , lowerCAmelCase_ : torch.LongTensor , lowerCAmelCase_ : torch.FloatTensor , **lowerCAmelCase_ : Optional[Any] ) -> bool:
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[float] = None ) -> Dict:
__lowerCAmelCase = max_time
__lowerCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : torch.LongTensor , lowerCAmelCase_ : torch.FloatTensor , **lowerCAmelCase_ : Optional[Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Any , lowerCAmelCase_ : torch.LongTensor , lowerCAmelCase_ : torch.FloatTensor , **lowerCAmelCase_ : List[Any] ) -> bool:
return any(criteria(lowerCAmelCase_ , lowerCAmelCase_ ) for criteria in self )
@property
def lowercase ( self : int ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
return None
def a_ ( lowerCAmelCase_ : StoppingCriteriaList, lowerCAmelCase_ : int ):
__lowerCAmelCase = stopping_criteria.max_length
__lowerCAmelCase = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 284 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a__ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a__ = '''>>zh<<'''
a__ = '''Helsinki-NLP/'''
if is_torch_available():
a__ = '''pt'''
elif is_tf_available():
a__ = '''tf'''
else:
a__ = '''jax'''
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = MarianTokenizer
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> List[Any]:
super().setUp()
_a : List[str] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
_a : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : Dict = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
_a : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self , **_a ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def __lowercase ( self ) -> Dict:
_a : Tuple = '''</s>'''
_a : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> Tuple:
_a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_a ) , 9 )
def __lowercase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
_a : List[str] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_a )
self.assertIsInstance(_a , _a )
_a : str = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(_a , batch.input_ids[0] )
_a : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
_a : Optional[Any] = [x.name for x in Path(_a ).glob('''*''' )]
self.assertIn('''source.spm''' , _a )
MarianTokenizer.from_pretrained(_a )
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.get_tokenizer()
_a : List[str] = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_tokenizer()
_a : List[Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __lowercase ( self ) -> Dict:
# fmt: off
_a : List[str] = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
_a : Any = '''Tämä on testi'''
_a : List[Any] = '''This is a test'''
_a : Tuple = [7_6, 7, 2_0_4_7, 2]
_a : Union[str, Any] = [6_9, 1_2, 1_1, 9_4_0, 2]
_a : str = tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
_a : str = tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
_a : List[str] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 364 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case__ : str = list(range(len(__lowerCAmelCase ) ) )
snake_case__ : str = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
snake_case__ : Tuple = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = []
for part_id in partition_order:
snake_case__ : Any = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(100 ).repartition(1 )
snake_case__ : Optional[int] = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(10 ).repartition(2 )
snake_case__ : Any = [1, 0]
snake_case__ : Tuple = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
snake_case__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ , snake_case__ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : List[Any] = spark.range(10 ).repartition(1 )
snake_case__ : int = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case__ : Union[str, Any] = lambda __lowerCAmelCase : x.reverse()
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(100 ).repartition(1 )
snake_case__ : Tuple = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 230 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
def __init__(self : List[Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 370 | from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A :
def __init__(self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=9_9 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = TFRoFormerForCausalLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[Any] = False
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
UpperCAmelCase__ = 5_0_0_0_0
UpperCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Tuple = 1E-4
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tf.constant([[4, 1_0]] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase__ = emba(input_ids.shape )
UpperCAmelCase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
UpperCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = 1E-4
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
UpperCAmelCase__ = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 143 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase_ = None
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCAmelCase_ = PandasConfig
def lowercase (self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def lowercase (self , UpperCAmelCase ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
_snake_case = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_snake_case = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowercase (self , UpperCAmelCase ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
for i, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , """rb""" ) as f:
_snake_case = pa.Table.from_pandas(pd.read_pickle(_lowerCamelCase ) )
yield i, self._cast_table(_lowerCamelCase ) | 341 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
_snake_case : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[Any] = format(snake_case__ , """08x""" )[-8:]
_snake_case : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = B""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
_snake_case : List[Any] = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
_snake_case : List[str] = bit_string[pos : pos + 5_12]
_snake_case : List[str] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[int] = format(snake_case__ , """032b""" )
_snake_case : str = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Any = preprocess(snake_case__ )
_snake_case : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Union[str, Any] = 0x6745_2301
_snake_case : List[Any] = 0xEFCD_AB89
_snake_case : Optional[Any] = 0x98BA_DCFE
_snake_case : Optional[int] = 0x1032_5476
_snake_case : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
_snake_case : Tuple = aa
_snake_case : str = ba
_snake_case : int = ca
_snake_case : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : int = d ^ (b & (c ^ d))
_snake_case : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Tuple = c ^ (d & (b ^ c))
_snake_case : int = (5 * i + 1) % 16
elif i <= 47:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : Tuple = c ^ (b | not_aa(snake_case__ ))
_snake_case : Optional[int] = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : List[Any] = c
_snake_case : str = b
_snake_case : List[str] = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Union[str, Any] = sum_aa(snake_case__ , snake_case__ )
_snake_case : str = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = sum_aa(snake_case__ , snake_case__ )
_snake_case : List[str] = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
_SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
_SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
_SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : str = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(FlaxAutoModel)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 183 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=True , ) -> str:
lowerCamelCase_ = size if size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = apply_ocr
def UpperCamelCase ( self : int ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self : List[str] ) -> int:
lowerCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCamelCase ( self : Dict ) -> Any:
pass
def UpperCamelCase ( self : int ) -> Dict:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> int:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> Any:
# with apply_OCR = True
lowerCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCamelCase_ = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
lowerCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 183 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case : Optional[int] = """base_with_context"""
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : List[str] = weights[F'layers_{lyr_num}']
__snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__snake_case : Optional[Any] = ly_weight["attention"]
__snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : int = weights[F'layers_{lyr_num}']
__snake_case : Tuple = ly_weight["attention"]
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__snake_case : Any = weights[F'layers_{lyr_num}']
__snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__snake_case : Any = ly_weight["self_attention"]
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__snake_case : List[Any] = ly_weight["MultiHeadDotProductAttention_0"]
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__snake_case : int = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__snake_case : Dict = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE_ )
__snake_case : Any = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__snake_case : Tuple = os.path.join(args.checkpoint_path , ".." , "config.gin" )
__snake_case : Optional[Any] = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case : Dict = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE_ )
__snake_case : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
__snake_case : Union[str, Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__snake_case : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__snake_case : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__snake_case : Any = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , SCREAMING_SNAKE_CASE_ )
__snake_case : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , SCREAMING_SNAKE_CASE_ )
__snake_case : int = load_decoder(ta_checkpoint["target"]["decoder"] , SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__snake_case : str = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE_ , continuous_encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , melgan=SCREAMING_SNAKE_CASE_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case : Optional[int] = parser.parse_args()
main(args)
| 366 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["input_features", "is_longer"]
def __init__( self : Optional[int] , lowerCamelCase : Any=64 , lowerCamelCase : Dict=48000 , lowerCamelCase : Dict=480 , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[int]=1024 , lowerCamelCase : int=0.0 , lowerCamelCase : Any=False , lowerCamelCase : float = 0 , lowerCamelCase : float = 14000 , lowerCamelCase : int = None , lowerCamelCase : str = "fusion" , lowerCamelCase : str = "repeatpad" , **lowerCamelCase : Optional[int] , ) -> Dict:
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[Any] = top_db
__snake_case : Dict = truncation
__snake_case : Dict = padding
__snake_case : Optional[Any] = fft_window_size
__snake_case : Optional[Any] = (fft_window_size >> 1) + 1
__snake_case : Dict = hop_length
__snake_case : Optional[int] = max_length_s
__snake_case : Optional[int] = max_length_s * sampling_rate
__snake_case : Dict = sampling_rate
__snake_case : Optional[int] = frequency_min
__snake_case : Any = frequency_max
__snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale="htk" , )
__snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : str ) -> Dict[str, Any]:
__snake_case : List[str] = copy.deepcopy(self.__dict__ )
__snake_case : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case ( self : List[Any] , lowerCamelCase : np.array , lowerCamelCase : Optional[np.array] = None ) -> np.ndarray:
__snake_case : List[Any] = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ) -> str:
__snake_case : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
# randomly choose index for each part
__snake_case : List[Any] = np.random.choice(ranges[0] )
__snake_case : int = np.random.choice(ranges[1] )
__snake_case : List[str] = np.random.choice(ranges[2] )
__snake_case : Dict = mel[idx_front : idx_front + chunk_frames, :]
__snake_case : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case : Tuple = mel[idx_back : idx_back + chunk_frames, :]
__snake_case : Optional[Any] = torch.tensor(mel[None, None, :] )
__snake_case : Optional[int] = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : List[Any] = mel_shrink[0][0].numpy()
__snake_case : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case ( self : Any , lowerCamelCase : np.array , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case : Tuple = len(lowerCamelCase ) - max_length
__snake_case : List[Any] = np.random.randint(0 , overflow + 1 )
__snake_case : Dict = waveform[idx : idx + max_length]
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__snake_case : Any = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case : str = np.stack([mel, mel, mel, mel] , axis=0 )
__snake_case : Optional[Any] = False
else:
__snake_case : Any = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__snake_case : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case : List[str] = int(max_length / len(lowerCamelCase ) )
__snake_case : Any = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__snake_case : str = int(max_length / len(lowerCamelCase ) )
__snake_case : List[str] = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
__snake_case : str = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__snake_case : List[str] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : str = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Any , ) -> BatchFeature:
__snake_case : Union[str, Any] = truncation if truncation is not None else self.truncation
__snake_case : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case : Any = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__snake_case : str = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : Tuple = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__snake_case : str = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : Union[str, Any] = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case : Optional[int] = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
__snake_case : Optional[int] = []
__snake_case : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case : Optional[Any] = np.random.randint(0 , len(lowerCamelCase ) )
__snake_case : Union[str, Any] = True
if isinstance(input_mel[0] , lowerCamelCase ):
__snake_case : List[str] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__snake_case : Any = [[longer] for longer in is_longer]
__snake_case : Tuple = {"input_features": input_mel, "is_longer": is_longer}
__snake_case : List[str] = BatchFeature(lowerCamelCase )
if return_tensors is not None:
__snake_case : Any = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 134 | 0 |
'''simple docstring'''
import argparse
import os
import re
__lowercase : Optional[int] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__lowercase : int = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase : str = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase : List[Any] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase : Optional[int] = re.compile(R'\[([^\]]+)\]')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Optional[int] = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]="" , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=None ):
__a : Optional[Any] = 0
__a : str = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
__a : List[Any] = ['\n'.join(lines[:index] )]
else:
__a : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a : Dict = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
__a : Any = [lines[index + 1]]
index += 1
else:
__a : Tuple = []
else:
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
def _inner(_SCREAMING_SNAKE_CASE : Optional[int] ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace('_' , '' )
return _inner
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
# If no key is provided, we use a noop.
def noop(_SCREAMING_SNAKE_CASE : int ):
return x
if key is None:
__a : Union[str, Any] = noop
# Constants are all uppercase, they go first.
__a : Any = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a : List[str] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
__a : Optional[Any] = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
__a : Any = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
# This inner function sort imports between [ ].
def _replace(_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[int] = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__a : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a : List[Any] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
__a : List[Any] = import_statement.split('\n' )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a : List[str] = 2 if lines[1].strip() == '[' else 1
__a : int = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a : str = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )
__a : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__a : str = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a : List[Any] = keys[:-1]
__a : str = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
__a : Tuple = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE )
return import_statement
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
__a : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a : Optional[int] = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a : List[Any] = main_blocks[block_idx]
__a : Any = block.split('\n' )
# Get to the start of the imports.
__a : Dict = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
__a : Any = '\n'.join(block_lines[line_idx:-1] )
__a : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a : int = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
__a : Optional[Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a : Union[str, Any] = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a : Optional[Any] = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
__a : Tuple = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a : List[str] = 0
__a : Any = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__a : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
__a : List[Any] = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
__a : Optional[int] = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__a : int = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=_SCREAMING_SNAKE_CASE )
if result:
__a : List[Any] = [os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowercase : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 1 |
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = ['''torch''', '''torchsde''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 81 | from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
_SCREAMING_SNAKE_CASE = object
def snake_case ( *snake_case__ :Optional[int] , **snake_case__ :Any) -> int:
pass
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving')
def snake_case ( snake_case__ :Namespace) -> Dict:
_A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case__ , args.host , args.port , args.workers)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :dict
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str]
lowerCamelCase :Optional[List[int]]
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowerCAmelCase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowerCAmelCase_ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowerCAmelCase_ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowerCAmelCase_ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowerCAmelCase_ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowerCAmelCase_ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase_ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = pipeline
_A = host
_A = port
_A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'''Serving model over {host}:{port}''' )
_A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def UpperCAmelCase ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]:
try:
_A = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
_A = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict:
try:
_A = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
async def UpperCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase_ )} )
| 81 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
_lowercase : Union[str, Any] = ''
while len(lowerCamelCase_ ) % 3 != 0:
_lowercase : List[str] = '0' + bin_string
_lowercase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowercase : Dict = 0
for index, val in enumerate(lowerCamelCase_ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase_ ) )
oct_string += str(lowerCamelCase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 366 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> list[list[str]]:
_lowercase : Optional[Any] = word_bank or []
# create a table
_lowercase : int = len(lowerCamelCase_ ) + 1
_lowercase : list[list[list[str]]] = []
for _ in range(lowerCamelCase_ ):
table.append([] )
# seed value
_lowercase : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase_ )] == word:
_lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase_ )]:
combination.reverse()
return table[len(lowerCamelCase_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 84 | 0 |
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = (boundary[1] - boundary[0]) / steps
__lowercase = boundary[0]
__lowercase = boundary[1]
__lowercase = make_points(lowercase , lowercase , lowercase )
__lowercase = 0.0
y += (h / 2.0) * f(lowercase )
for i in x_i:
# print(i)
y += h * f(lowercase )
y += (h / 2.0) * f(lowercase )
return y
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = a + h
while x < (b - h):
yield x
__lowercase = x + h
def UpperCAmelCase ( lowercase ): # enter your function here
"""simple docstring"""
__lowercase = (x - 0) * (x - 0)
return y
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = 0.0 # Lower bound of integration
__lowercase = 1.0 # Upper bound of integration
__lowercase = 10.0 # define number of steps or resolution
__lowercase = [a, b] # define boundary of integration
__lowercase = method_a(lowercase , lowercase )
print(F"y = {y}" )
if __name__ == "__main__":
main() | 210 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 210 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A_ :int = logging.get_logger(__name__)
def A ( a_ ,a_ ,a_ ) -> int:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def A ( a_ ,a_ ,a_ = None ) -> Optional[int]:
__UpperCamelCase : Optional[Any] =tesseract_config if tesseract_config is not None else ''
# apply OCR
__UpperCamelCase : Any =to_pil_image(a_ )
__UpperCamelCase , __UpperCamelCase : Optional[int] =pil_image.size
__UpperCamelCase : Optional[Any] =pytesseract.image_to_data(a_ ,lang=a_ ,output_type='dict' ,config=a_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__UpperCamelCase : List[Any] =[idx for idx, word in enumerate(a_ ) if not word.strip()]
__UpperCamelCase : int =[word for idx, word in enumerate(a_ ) if idx not in irrelevant_indices]
__UpperCamelCase : Tuple =[coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
__UpperCamelCase : List[str] =[coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
__UpperCamelCase : int =[coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
__UpperCamelCase : Tuple =[coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase : Optional[int] =[]
for x, y, w, h in zip(a_ ,a_ ,a_ ,a_ ):
__UpperCamelCase : Union[str, Any] =[x, y, x + w, y + h]
actual_boxes.append(a_ )
# finally, normalize the bounding boxes
__UpperCamelCase : int =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(a_ ,a_ ,a_ ) )
assert len(a_ ) == len(a_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = "" , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Dict =size if size is not None else {'height': 224, 'width': 224}
__UpperCamelCase : Union[str, Any] =get_size_dict(lowerCamelCase__ )
__UpperCamelCase : int =do_resize
__UpperCamelCase : int =size
__UpperCamelCase : Any =resample
__UpperCamelCase : Union[str, Any] =apply_ocr
__UpperCamelCase : int =ocr_lang
__UpperCamelCase : List[Any] =tesseract_config
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Tuple =get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__UpperCamelCase : List[Any] =(size['height'], size['width'])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : int =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : Tuple =size if size is not None else self.size
__UpperCamelCase : List[Any] =get_size_dict(lowerCamelCase__ )
__UpperCamelCase : str =resample if resample is not None else self.resample
__UpperCamelCase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase : Optional[int] =ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase : int =make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase : Dict =[to_numpy_array(lowerCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
__UpperCamelCase : str =[]
__UpperCamelCase : int =[]
for image in images:
__UpperCamelCase , __UpperCamelCase : int =apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
__UpperCamelCase : Tuple =[self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__UpperCamelCase : Dict =[flip_channel_order(lowerCamelCase__ ) for image in images]
__UpperCamelCase : int =[to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__UpperCamelCase : Tuple =BatchFeature(data={'pixel_values': images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
__UpperCamelCase : Optional[Any] =words_batch
__UpperCamelCase : Any =boxes_batch
return data
| 245 |
def A ( a_ ) -> bool:
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.