code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = AudioLDMPipeline
_lowerCAmelCase = TEXT_TO_AUDIO_PARAMS
_lowerCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_lowercase , )
__a : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__a : Any = ClapTextModelWithProjection(_lowercase )
__a : Optional[int] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__a : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowercase , )
__a : int = SpeechTaHifiGan(_lowercase )
__a : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Any = torch.manual_seed(_lowercase )
else:
__a : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : List[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : str = AudioLDMPipeline(**_lowercase )
__a : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = self.get_dummy_inputs(_lowercase )
__a : Optional[int] = audioldm_pipe(**_lowercase )
__a : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__a : Union[str, Any] = audio[:10]
__a : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.get_dummy_components()
__a : List[str] = AudioLDMPipeline(**_lowercase )
__a : Dict = audioldm_pipe.to(_lowercase )
__a : List[str] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[str] = self.get_dummy_inputs(_lowercase )
__a : Any = 3 * [inputs["""prompt"""]]
# forward
__a : List[str] = audioldm_pipe(**_lowercase )
__a : Tuple = output.audios[0]
__a : int = self.get_dummy_inputs(_lowercase )
__a : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__a : str = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__a : Tuple = text_inputs["""input_ids"""].to(_lowercase )
__a : str = audioldm_pipe.text_encoder(
_lowercase , )
__a : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : List[str] = F.normalize(_lowercase , dim=-1 )
__a : List[Any] = prompt_embeds
# forward
__a : Dict = audioldm_pipe(**_lowercase )
__a : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.get_dummy_components()
__a : Any = AudioLDMPipeline(**_lowercase )
__a : Optional[Any] = audioldm_pipe.to(_lowercase )
__a : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[Any] = self.get_dummy_inputs(_lowercase )
__a : Optional[int] = 3 * ["""this is a negative prompt"""]
__a : Union[str, Any] = negative_prompt
__a : Dict = 3 * [inputs["""prompt"""]]
# forward
__a : Any = audioldm_pipe(**_lowercase )
__a : Union[str, Any] = output.audios[0]
__a : Dict = self.get_dummy_inputs(_lowercase )
__a : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__a : Union[str, Any] = []
for p in [prompt, negative_prompt]:
__a : str = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__a : Optional[int] = text_inputs["""input_ids"""].to(_lowercase )
__a : Optional[int] = audioldm_pipe.text_encoder(
_lowercase , )
__a : Optional[int] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : Tuple = F.normalize(_lowercase , dim=-1 )
embeds.append(_lowercase )
__a , __a : int = embeds
# forward
__a : List[Any] = audioldm_pipe(**_lowercase )
__a : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : Tuple = PNDMScheduler(skip_prk_steps=_lowercase )
__a : Optional[int] = AudioLDMPipeline(**_lowercase )
__a : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = self.get_dummy_inputs(_lowercase )
__a : Any = """egg cracking"""
__a : Optional[Any] = audioldm_pipe(**_lowercase , negative_prompt=_lowercase )
__a : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__a : List[Any] = audio[:10]
__a : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Optional[int] = PNDMScheduler(skip_prk_steps=_lowercase )
__a : Any = AudioLDMPipeline(**_lowercase )
__a : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__a : Optional[int] = audioldm_pipe(_lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__a : List[str] = 2
__a : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__a : str = 2
__a : Tuple = audioldm_pipe(_lowercase , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__a : List[Any] = 2
__a : List[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Tuple = AudioLDMPipeline(**_lowercase )
__a : int = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = audioldm_pipe.vocoder.config.sampling_rate
__a : List[str] = self.get_dummy_inputs(_lowercase )
__a : str = audioldm_pipe(audio_length_in_s=0.016 , **_lowercase )
__a : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.016
__a : List[str] = audioldm_pipe(audio_length_in_s=0.032 , **_lowercase )
__a : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.032
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.get_dummy_components()
__a : Dict = AudioLDMPipeline(**_lowercase )
__a : List[str] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = ["""hey"""]
__a : List[Any] = audioldm_pipe(_lowercase , num_inference_steps=1 )
__a : List[Any] = output.audios.shape
assert audio_shape == (1, 256)
__a : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__a : List[Any] = SpeechTaHifiGan(_lowercase ).to(_lowercase )
__a : Dict = audioldm_pipe(_lowercase , num_inference_steps=1 )
__a : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : int = np.random.RandomState(_lowercase ).standard_normal((1, 8, 128, 16) )
__a : str = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : Dict = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__a : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : str = self.get_inputs(_lowercase )
__a : Dict = 25
__a : Optional[int] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81920
__a : List[Any] = audio[77230:77240]
__a : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__a : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__a : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__a : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = self.get_inputs(_lowercase )
__a : Union[str, Any] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81920
__a : int = audio[27780:27790]
__a : List[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__a : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : Dict = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 1_8, 2]
__a : str = True if """large""" in model_name or """huge""" in model_name else False
__a : List[str] = True if """large""" in model_name or """huge""" in model_name else False
__a : List[Any] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__a : Optional[Any] = [3, 3, 3, 3]
__a : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
__a : Union[str, Any] = [4, 4, 4, 4]
__a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__a : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
__a : int = [3, 3, 3, 3]
else:
__a : Union[str, Any] = [2, 2, 2, 2]
if "tiny" in model_name:
__a : List[str] = 9_6
elif "small" in model_name:
__a : Any = 9_6
elif "base" in model_name:
__a : str = 1_2_8
elif "large" in model_name:
__a : Any = 1_9_2
elif "xlarge" in model_name:
__a : int = 2_5_6
elif "huge" in model_name:
__a : Tuple = 3_5_2
# set label information
__a : List[str] = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__a : Union[str, Any] = """imagenet-22k-id2label.json"""
else:
__a : Union[str, Any] = """imagenet-1k-id2label.json"""
__a : Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__a : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : str = {v: k for k, v in idalabel.items()}
__a : List[Any] = FocalNetConfig(
embed_dim=_lowerCamelCase , depths=_lowerCamelCase , focal_levels=_lowerCamelCase , focal_windows=_lowerCamelCase , use_conv_embed=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , use_post_layernorm=_lowerCamelCase , use_layerscale=_lowerCamelCase , )
return config
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if "patch_embed.proj" in name:
__a : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a : Tuple = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__a : List[str] = """encoder.""" + name
if "encoder.layers" in name:
__a : str = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__a : int = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__a : List[str] = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__a : List[Any] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__a : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__a : Optional[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__a : Tuple = """layernorm.weight"""
if name == "norm.bias":
__a : Dict = """layernorm.bias"""
if "head" in name:
__a : str = name.replace("""head""" , """classifier""" )
else:
__a : Union[str, Any] = """focalnet.""" + name
return name
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Any=False ):
# fmt: off
__a : Dict = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__a : str = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _lowerCamelCase )
__a : int = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__a : List[Any] = state_dict.pop(_lowerCamelCase )
__a : List[Any] = val
__a : str = get_focalnet_config(_lowerCamelCase )
__a : Dict = FocalNetForImageClassification(_lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCamelCase )
# verify conversion
__a : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Dict = BitImageProcessor(
do_resize=_lowerCamelCase , size={"""shortest_edge""": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCamelCase , crop_size=2_2_4 , do_normalize=_lowerCamelCase , image_mean=_lowerCamelCase , image_std=_lowerCamelCase , )
__a : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__a : List[Any] = processor(images=_lowerCamelCase , return_tensors="""pt""" )
__a : int = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__a : List[str] = image_transforms(_lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCamelCase , atol=1E-4 )
__a : List[str] = model(**_lowerCamelCase )
__a : Optional[Any] = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__a : Dict = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__a : Any = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__a : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__a : Dict = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__a : Union[str, Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__a : List[str] = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
lowercase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "van"
def __init__(self , _lowercase=224 , _lowercase=3 , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[64, 128, 320, 512] , _lowercase=[3, 3, 12, 3] , _lowercase=[8, 8, 4, 4] , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1e-6 , _lowercase=1e-2 , _lowercase=0.0 , _lowercase=0.0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Optional[Any] = image_size
__a : int = num_channels
__a : Dict = patch_sizes
__a : int = strides
__a : List[str] = hidden_sizes
__a : Optional[int] = depths
__a : Any = mlp_ratios
__a : List[Any] = hidden_act
__a : Tuple = initializer_range
__a : List[str] = layer_norm_eps
__a : Tuple = layer_scale_init_value
__a : Optional[Any] = drop_path_rate
__a : Optional[int] = dropout_rate
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ = "hf-internal-testing/tiny-random-bert"
lowercase__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowercase__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = cached_file(_lowercase , _lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_lowercase , _lowercase ) ) )
with open(os.path.join(_lowercase , """refs""" , """main""" ) ) as f:
__a : Any = f.read()
self.assertEqual(_lowercase , os.path.join(_lowercase , """snapshots""" , _lowercase , _lowercase ) )
self.assertTrue(os.path.isfile(_lowercase ) )
# File is cached at the same place the second time.
__a : List[Any] = cached_file(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Using a specific revision to test the full commit hash.
__a : List[Any] = cached_file(_lowercase , _lowercase , revision="""9b8c223""" )
self.assertEqual(_lowercase , os.path.join(_lowercase , """snapshots""" , _lowercase , _lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """is not a valid model identifier""" ):
__a : Union[str, Any] = cached_file("""tiny-random-bert""" , _lowercase )
with self.assertRaisesRegex(_lowercase , """is not a valid git identifier""" ):
__a : Tuple = cached_file(_lowercase , _lowercase , revision="""aaaa""" )
with self.assertRaisesRegex(_lowercase , """does not appear to have a file named""" ):
__a : str = cached_file(_lowercase , """conf""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """does not appear to have a file named""" ):
__a : Any = cached_file(_lowercase , """conf""" )
with open(os.path.join(_lowercase , """refs""" , """main""" ) ) as f:
__a : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(_lowercase , """.no_exist""" , _lowercase , """conf""" ) ) )
__a : List[Any] = cached_file(_lowercase , """conf""" , _raise_exceptions_for_missing_entries=_lowercase )
self.assertIsNone(_lowercase )
__a : str = cached_file(_lowercase , """conf""" , local_files_only=_lowercase , _raise_exceptions_for_missing_entries=_lowercase )
self.assertIsNone(_lowercase )
__a : List[Any] = mock.Mock()
__a : List[Any] = 500
__a : int = {}
__a : Any = HTTPError
__a : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head:
__a : Union[str, Any] = cached_file(_lowercase , """conf""" , _raise_exceptions_for_connection_errors=_lowercase )
self.assertIsNone(_lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_lowercase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_lowercase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _lowercase , revision="""ahaha""" )
__a : str = get_file_from_repo("""bert-base-cased""" , _lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__a : Union[str, Any] = json.loads(open(_lowercase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Any = Path(_lowercase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_lowercase , """a.txt""" ) , str(_lowercase ) )
self.assertIsNone(get_file_from_repo(_lowercase , """b.txt""" ) )
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowerCAmelCase = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCAmelCase = Features({"question": Value("string" ), "context": Value("string" )} )
_lowerCAmelCase = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_lowerCAmelCase = "question"
_lowerCAmelCase = "context"
_lowerCAmelCase = "answers"
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase__ = 637_8137.0
lowercase__ = 635_6752.31_4245
lowercase__ = 6378137
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
__a : Tuple = (AXIS_A - AXIS_B) / AXIS_A
__a : str = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__a : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__a : Dict = radians(_lowerCamelCase )
__a : Any = radians(_lowerCamelCase )
# Equation
__a : Optional[int] = sin((phi_a - phi_a) / 2 )
__a : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a : Optional[Any] = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : Tuple = {}
def lowerCAmelCase__(self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_lowercase , """ -> """ , """ -> """.join([str(_lowercase ) for j in self.vertex[i]] ) )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowercase )
else:
# else make a new vertex
__a : Optional[int] = [to_vertex]
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = True
print(_lowercase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
if __name__ == "__main__":
lowercase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = list[list[int]]
# assigning initial values to the grid
lowercase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( _lowerCamelCase : Matrix , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( _lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( _lowerCamelCase : Matrix ):
if location := find_empty_location(_lowerCamelCase ):
__a , __a : List[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__a : int = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
__a : Optional[int] = 0
return None
def __magic_name__ ( _lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(_lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
lowercase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = "Muhammad Umer Farooq"
lowercase__ = "MIT"
lowercase__ = "1.0.0"
lowercase__ = "Muhammad Umer Farooq"
lowercase__ = "contact@muhammadumerfarooq.me"
lowercase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : list[str] = []
__a : Optional[Any] = domain
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a : Any = parse.urljoin(self.domain , _lowercase )
self.urls.append(_lowercase )
def __magic_name__ ( _lowerCamelCase : str ):
return ".".join(get_sub_domain_name(_lowerCamelCase ).split(""".""" )[-2:] )
def __magic_name__ ( _lowerCamelCase : str ):
return parse.urlparse(_lowerCamelCase ).netloc
def __magic_name__ ( _lowerCamelCase : str = "https://github.com" ):
__a : Dict = get_domain_name(_lowerCamelCase )
# Initialize the parser
__a : Tuple = Parser(_lowerCamelCase )
try:
# Open URL
__a : List[Any] = requests.get(_lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a : List[str] = requests.get(_lowerCamelCase )
# Get the valid email.
__a : Union[str, Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = emails_from_url("https://github.com")
print(f'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def __magic_name__ ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : bool = False ):
if not arr:
return 0
__a : Optional[int] = 0 if allow_empty_subarrays else float("""-inf""" )
__a : Union[str, Any] = 0.0
for num in arr:
__a : Union[str, Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
__a : Any = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
__a : int = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__a : str = model(_lowercase )["""last_hidden_state"""]
__a : Tuple = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
__a : Union[str, Any] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_lowercase , )
assert hasattr(self , """env""" )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
__a : Union[str, Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
__a : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
__a : Tuple = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version="""py36""" , )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
TrainingJobAnalytics(_lowercase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
__a : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _lowercase )
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = SMALL_MODEL_IDENTIFIER
__a : List[Any] = """pt"""
__a : Union[str, Any] = """tf"""
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = """mock_framework"""
# Framework provided - return whatever the user provides
__a : str = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
__a : int = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
__a : List[str] = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
__a : List[Any] = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
__a : str = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
__a : str = FeaturesManager.determine_framework(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ):
__a : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__a : Tuple = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , _lowercase ):
__a : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__a : List[Any] = MagicMock(return_value=_lowercase )
__a : List[Any] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
__a : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
__a : int = MagicMock(return_value=_lowercase )
__a : List[str] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
with self.assertRaises(_lowercase ):
__a : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ):
'''simple docstring'''
__a : Dict = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : Union[str, Any] = is_training
__a : Dict = use_attention_mask
__a : Union[str, Any] = use_token_type_ids
__a : str = use_labels
__a : Tuple = vocab_size
__a : Optional[Any] = hidden_size
__a : int = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : List[Any] = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Any = initializer_range
__a : Optional[Any] = num_choices
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[int] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Tuple = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : List[Any] = config_and_inputs
__a : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Tuple = config_and_inputs
__a : Any = True
__a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowercase )
__a : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = CLIPConfig
_lowerCAmelCase = ["CLIPEncoderLayer"]
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__(_lowercase )
__a : int = CLIPVisionModelWithProjection(config.vision_config )
__a : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
__a : int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=0.5 , _lowercase=0.5 ):
'''simple docstring'''
__a : Dict = self.vision_model(_lowercase )[0]
__a : List[Any] = self.p_head(_lowercase )
__a : Optional[int] = nsfw_detected.flatten()
__a : Optional[int] = nsfw_detected > p_threshold
__a : Tuple = nsfw_detected.tolist()
if any(_lowercase ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(_lowercase ):
if nsfw_detected_:
__a : int = np.zeros(images[idx].shape )
__a : Optional[Any] = self.w_head(_lowercase )
__a : List[str] = watermark_detected.flatten()
__a : Union[str, Any] = watermark_detected > w_threshold
__a : Any = watermark_detected.tolist()
if any(_lowercase ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(_lowercase ):
if watermark_detected_:
__a : List[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Dict = sides_number
__a : Optional[Any] = max_face_number * dice_number
__a : str = [0] * (max_total + 1)
__a : Optional[int] = 1
__a : List[Any] = range(_lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(_lowerCamelCase , repeat=_lowerCamelCase ):
__a : List[Any] = sum(_lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ):
__a : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : Optional[Any] = 0
__a : Any = 9
__a : Union[str, Any] = 4 * 9
__a : Optional[Any] = 6
for peter_total in range(_lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : Union[str, Any] = (4**9) * (6**6)
__a : List[str] = peter_wins_count / total_games_number
__a : Dict = round(_lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = "cpu" , _lowercase = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__a : Any = device
__a : Tuple = CLIPTokenizerFast.from_pretrained(_lowercase )
__a : List[str] = [0.4814_5466, 0.457_8275, 0.4082_1073]
__a : Optional[Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__a : Optional[int] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a : Tuple = torchvision.transforms.Resize(224 )
__a : str = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = self.resize(_lowercase )
__a : List[str] = self.center_crop(_lowercase )
__a : Union[str, Any] = self.normalize(_lowercase )
return images
def __call__(self , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
__a : Any = self.tokenizer(text=_lowercase , **_lowercase )
__a : Union[str, Any] = self.preprocess_img(_lowercase )
__a : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase=10 , _lowercase=0.01 , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase="image" , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ):
'''simple docstring'''
super().__init__()
__a : Any = None
__a : Tuple = device if device else get_device()
if vqgan:
__a : Optional[Any] = vqgan
else:
__a : List[Any] = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase )
self.vqgan.eval()
if clip:
__a : str = clip
else:
__a : Dict = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__a : Any = ProcessorGradientFlow(device=self.device )
__a : Any = iterations
__a : Optional[Any] = lr
__a : Optional[Any] = log
__a : Dict = make_grid
__a : Dict = return_val
__a : Tuple = quantize
__a : Optional[Any] = self.vqgan.decoder.z_shape
def lowerCAmelCase__(self , _lowercase=None , _lowercase=None , _lowercase=5 , _lowercase=True ):
'''simple docstring'''
__a : str = []
if output_path is None:
__a : Optional[int] = """./animation.gif"""
if input_path is None:
__a : List[str] = self.save_path
__a : Tuple = sorted(glob(input_path + """/*""" ) )
if not len(_lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(_lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__a : Union[str, Any] = total_duration / len(_lowercase )
__a : Optional[Any] = [frame_duration] * len(_lowercase )
if extend_frames:
__a : Optional[int] = 1.5
__a : str = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(_lowercase ) )
imageio.mimsave(_lowercase , _lowercase , duration=_lowercase )
print(F'''gif saved to {output_path}''' )
def lowerCAmelCase__(self , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__a : Any = preprocess(Image.open(_lowercase ) , target_image_size=256 ).to(self.device )
__a : str = preprocess_vqgan(_lowercase )
__a , *__a : List[str] = self.vqgan.encode(_lowercase )
return z
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[str] = self.latent.detach().requires_grad_()
__a : Optional[Any] = base_latent + transform_vector
if self.quantize:
__a , *__a : str = self.vqgan.quantize(_lowercase )
else:
__a : str = trans_latent
return self.vqgan.decode(_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : Tuple = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors="""pt""" , padding=_lowercase )
__a : Any = self.clip(**_lowercase )
__a : List[str] = clip_outputs.logits_per_image
if weights is not None:
__a : Optional[int] = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self._get_clip_similarity(pos_prompts["""prompts"""] , _lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__a : Tuple = self._get_clip_similarity(neg_prompts["""prompts"""] , _lowercase , weights=neg_prompts["""weights"""] )
else:
__a : Any = torch.tensor([1] , device=self.device )
__a : Dict = -torch.log(_lowercase ) + torch.log(_lowercase )
return loss
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device )
__a : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a : int = self._add_vector(_lowercase )
__a : Any = loop_post_process(_lowercase )
__a : Dict = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase )
print("""CLIP loss""" , _lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=_lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
wandb.init(reinit=_lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__a : Optional[int] = Image.open(_lowercase )
__a : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(_lowercase ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not prompts:
return []
__a : Optional[Any] = []
__a : Optional[Any] = []
if isinstance(_lowercase , _lowercase ):
__a : Dict = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(_lowercase , (tuple, list) ):
__a : str = prompt[0]
__a : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
__a , __a : Optional[int] = prompt.split(""":""" )
__a : Any = float(_lowercase )
else:
__a : Dict = prompt
__a : List[str] = 1.0
processed_prompts.append(_lowercase )
weights.append(_lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowercase , device=self.device ),
}
def lowerCAmelCase__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=None , ):
'''simple docstring'''
if image_path:
__a : int = self._get_latent(_lowercase )
else:
__a : Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowercase , _lowercase , _lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
__a : Union[str, Any] = self.process_prompts(_lowercase )
__a : Optional[Any] = self.process_prompts(_lowercase )
if save_final and save_path is None:
__a : Dict = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
__a : Union[str, Any] = save_path + """_""" + get_timestamp()
os.makedirs(_lowercase )
__a : Tuple = save_path
__a : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(_lowercase ) )
__a : Optional[int] = loop_post_process(_lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ):
if show_intermediate:
show_pil(_lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(_lowercase )} )
if show_final:
show_pil(_lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase__ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
for attribute in key.split(""".""" ):
__a : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__a : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__a : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__a : List[str] = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Optional[Any] = value
else:
__a : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ):
__a : Union[str, Any] = []
__a : Optional[int] = fairseq_model.state_dict()
__a : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : List[str] = True
if "*" in mapped_key:
__a : str = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__a : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__a : str = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__a : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : List[str] = """weight"""
else:
__a : Union[str, Any] = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
__a : str = full_name.split("""conv_layers.""" )[-1]
__a : Any = name.split(""".""" )
__a : Dict = int(items[0] )
__a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__a : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__a : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__a : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__a : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
# load the pre-trained checkpoints
__a : int = torch.load(_lowerCamelCase )
__a : Dict = WavLMConfigOrig(checkpoint["""cfg"""] )
__a : Union[str, Any] = WavLMOrig(_lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__a : Optional[int] = WavLMConfig.from_pretrained(_lowerCamelCase )
else:
__a : Dict = WavLMConfig()
__a : Dict = WavLMModel(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase )
hf_wavlm.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42 # [batch_size x 3]
_lowerCAmelCase = 42 # [batch_size x 3]
_lowerCAmelCase = 42 # [batch_size x 3]
_lowerCAmelCase = 42 # [batch_size x 3]
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def lowerCAmelCase__(self ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase__(self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = torch.arange(self.height * self.width )
__a : Tuple = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowercase , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Union[str, Any] = self.shape
__a : int = int(np.prod(_lowercase ) )
__a : Union[str, Any] = self.get_image_coords()
__a : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a : List[str] = self.get_camera_rays(_lowercase )
__a : Union[str, Any] = rays.view(_lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , *__a , __a : str = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a : List[str] = coords.view(_lowercase , -1 , 2 )
__a : List[Any] = self.resolution()
__a : List[Any] = self.fov()
__a : List[str] = (flat.float() / (res - 1)) * 2 - 1
__a : int = fracs * torch.tan(fov / 2 )
__a : Optional[int] = fracs.view(_lowercase , -1 , 2 )
__a : Optional[int] = (
self.z.view(_lowercase , 1 , 3 )
+ self.x.view(_lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
__a : List[str] = directions / directions.norm(dim=-1 , keepdim=_lowercase )
__a : int = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_lowercase , *_lowercase , 2 , 3 )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowercase , height=_lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def __magic_name__ ( _lowerCamelCase : int ):
__a : int = []
__a : str = []
__a : Optional[int] = []
__a : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a : str = np.array([np.sin(_lowerCamelCase ), np.cos(_lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a : Any = -z * 4
__a : Union[str, Any] = np.array([np.cos(_lowerCamelCase ), -np.sin(_lowerCamelCase ), 0.0] )
__a : Dict = np.cross(_lowerCamelCase , _lowerCamelCase )
origins.append(_lowerCamelCase )
xs.append(_lowerCamelCase )
ys.append(_lowerCamelCase )
zs.append(_lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , width=_lowerCamelCase , height=_lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowerCamelCase )) , )
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase__ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : Any = WATERMARK_BITS
__a : List[str] = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
__a : List[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : List[str] = [self.encoder.encode(_lowercase , """dwtDct""" ) for image in images]
__a : List[str] = torch.from_numpy(np.array(_lowercase ) ).permute(0 , 3 , 1 , 2 )
__a : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = BarthezTokenizer
_lowerCAmelCase = BarthezTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowercase )
__a : List[str] = tokenizer
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = """<pad>"""
__a : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowercase ) , 101122 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : str = [0, 57, 3018, 70307, 91, 2]
__a : Any = self.tokenizer(
_lowercase , max_length=len(_lowercase ) , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__a : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : Optional[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer()
__a : List[str] = """I was born in 92000, and this is falsé."""
__a : Optional[Any] = tokenizer.tokenize(_lowercase )
__a : Tuple = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__a : List[Any] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Dict = self.get_rust_tokenizer()
__a : Optional[Any] = tokenizer.encode(_lowercase )
__a : Tuple = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_lowercase , )
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = 0
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = Path(_lowercase ) / """preprocessor_config.json"""
__a : Dict = Path(_lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowercase , """w""" ) )
__a : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Optional[int] = Path(_lowercase ) / """preprocessor_config.json"""
__a : int = Path(_lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowercase , """w""" ) )
__a : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__a : Dict = Path(_lowercase ) / """preprocessor_config.json"""
__a : Optional[Any] = Path(_lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowercase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__a : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("""image_processor_type""" )
__a : Union[str, Any] = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
__a : int = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
__a : Any = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Dict = Path(_lowercase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowercase , """w""" ) , )
__a : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """clip-base is not a local folder and is not a valid model identifier""" ):
__a : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__a : Dict = AutoImageProcessor.from_pretrained(_lowercase , revision="""aaaaaa""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__a : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaises(_lowercase ):
__a : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
__a : List[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowercase )
__a : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
__a : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Dict = Path(_lowercase ) / """preprocessor_config.json"""
__a : List[str] = Path(_lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowercase , """w""" ) )
__a : Optional[Any] = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
__a : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__(self ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
__a : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__a : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__a : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__a : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) )
return round(_lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : str = pos_x
__a : Optional[int] = pos_y
__a : Optional[int] = (pos_y, pos_x)
__a : List[str] = goal_x
__a : Any = goal_y
__a : Any = g_cost
__a : int = parent
__a : List[str] = self.calculate_heuristic()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = abs(self.pos_x - self.goal_x )
__a : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , _lowercase ):
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowercase )
__a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _lowercase )
__a : int = [self.start]
__a : list[Node] = []
__a : Optional[int] = False
def lowerCAmelCase__(self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a : List[Any] = True
return self.retrace_path(_lowercase )
self.closed_nodes.append(_lowercase )
__a : Dict = self.get_successors(_lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowercase )
else:
# retrieve the best current path
__a : List[Any] = self.open_nodes.pop(self.open_nodes.index(_lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowercase )
else:
self.open_nodes.append(_lowercase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = []
for action in delta:
__a : Union[str, Any] = parent.pos_x + action[1]
__a : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowercase , _lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowercase , ) )
return successors
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = node
__a : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a : List[str] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase__ = GreedyBestFirst(init, goal)
lowercase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ = 2
for elem in grid:
print(elem)
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__a : List[Any] = False
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
for word in words:
self.insert(_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = self
for char in word:
if char not in curr.nodes:
__a : Optional[int] = TrieNode()
__a : List[str] = curr.nodes[char]
__a : List[Any] = True
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Tuple = self
for char in word:
if char not in curr.nodes:
return False
__a : Optional[int] = curr.nodes[char]
return curr.is_leaf
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
def _delete(_lowercase , _lowercase , _lowercase ) -> bool:
if index == len(_lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
__a : Optional[Any] = False
return len(curr.nodes ) == 0
__a : Optional[int] = word[index]
__a : Any = curr.nodes.get(_lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__a : Union[str, Any] = _delete(_lowercase , _lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowercase , 0 )
def __magic_name__ ( _lowerCamelCase : TrieNode , _lowerCamelCase : str ):
if node.is_leaf:
print(_lowerCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def __magic_name__ ( ):
__a : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
__a : List[Any] = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : bool ):
print(str(_lowerCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def __magic_name__ ( ):
assert test_trie()
def __magic_name__ ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = ["image_processor", "tokenizer"]
_lowerCAmelCase = "OwlViTImageProcessor"
_lowerCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
__a : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowercase , )
__a : str = kwargs.pop("""feature_extractor""" )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowercase , _lowercase )
def __call__(self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="max_length" , _lowercase="np" , **_lowercase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowercase , _lowercase ) or (isinstance(_lowercase , _lowercase ) and not isinstance(text[0] , _lowercase )):
__a : Any = [self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )]
elif isinstance(_lowercase , _lowercase ) and isinstance(text[0] , _lowercase ):
__a : Optional[Any] = []
# Maximum number of queries across batch
__a : str = max([len(_lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowercase ) != max_num_queries:
__a : int = t + [""" """] * (max_num_queries - len(_lowercase ))
__a : Dict = self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )
encodings.append(_lowercase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
__a : Union[str, Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a : Union[str, Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__a : Union[str, Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a : Optional[int] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__a : List[str] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
__a : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__a : List[str] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__a : Dict = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
__a : List[str] = BatchEncoding()
__a : Union[str, Any] = input_ids
__a : str = attention_mask
if query_images is not None:
__a : int = BatchEncoding()
__a : str = self.image_processor(
_lowercase , return_tensors=_lowercase , **_lowercase ).pixel_values
__a : Union[str, Any] = query_pixel_values
if images is not None:
__a : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
__a : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__a : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.image_processor.post_process(*_lowercase , **_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowercase , **_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowercase , **_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowercase , )
return self.image_processor_class
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowercase , )
return self.image_processor
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowercase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "rag"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=True , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=" / " , _lowercase=" // " , _lowercase=5 , _lowercase=300 , _lowercase=768 , _lowercase=8 , _lowercase="wiki_dpr" , _lowercase="train" , _lowercase="compressed" , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=0.0 , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=True , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(
bos_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , is_encoder_decoder=_lowercase , prefix=_lowercase , vocab_size=_lowercase , **_lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__a : Optional[int] = kwargs.pop("""question_encoder""" )
__a : List[str] = question_encoder_config.pop("""model_type""" )
__a : List[Any] = kwargs.pop("""generator""" )
__a : List[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__a : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
__a : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
__a : Dict = reduce_loss
__a : Tuple = label_smoothing
__a : List[Any] = exclude_bos_score
__a : Optional[int] = do_marginalize
__a : int = title_sep
__a : Any = doc_sep
__a : str = n_docs
__a : int = max_combined_length
__a : List[str] = dataset
__a : Optional[int] = dataset_split
__a : Optional[Any] = index_name
__a : Tuple = retrieval_vector_size
__a : Dict = retrieval_batch_size
__a : Optional[int] = passages_path
__a : Optional[Any] = index_path
__a : int = use_dummy_dataset
__a : List[str] = output_retrieved
__a : List[Any] = do_deduplication
__a : int = use_cache
if self.forced_eos_token_id is None:
__a : str = getattr(self.generator , """forced_eos_token_id""" , _lowercase )
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : List[str] = self.question_encoder.to_dict()
__a : Any = self.generator.to_dict()
__a : Optional[Any] = self.__class__.model_type
return output
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"yjernite/retribert-base-uncased": 512,
}
lowercase__ = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__a : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
__a : str = getattr(_lowercase , normalizer_state.pop("""type""" ) )
__a : str = do_lower_case
__a : str = strip_accents
__a : List[str] = tokenize_chinese_chars
__a : Optional[Any] = normalizer_class(**_lowercase )
__a : List[Any] = do_lower_case
def lowerCAmelCase__(self , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Union[str, Any] = [self.sep_token_id]
__a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : List[Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase__ = input("Enter image url: ").strip()
print(f'Downloading image from {url} ...')
lowercase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowercase__ = soup.find("meta", {"property": "og:image"})["content"]
lowercase__ = requests.get(image_url).content
lowercase__ = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
__a : Tuple = tmp_path / """cache"""
__a : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : Dict = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a : Dict = tmp_path / """cache"""
__a : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : Dict = features.copy() if features else default_expected_features
__a : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : List[str] ):
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
__a : str = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
__a : Optional[int] = tmp_path / """cache"""
__a : Optional[Any] = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__a : Union[str, Any] = iter_sql_file(_lowerCamelCase )
__a : Optional[int] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
__a : Any = tmp_path / """cache"""
__a : Any = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__a : Dict = iter_sql_file(_lowerCamelCase )
__a : Optional[Any] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
__a : Dict = tmp_path / """cache"""
__a : int = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
with pytest.raises(_lowerCamelCase ):
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( _lowerCamelCase : Optional[int]="" ):
__a : str = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
__a : str = AgentAudio(_lowercase )
__a : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowercase ) )
# Ensure that the file contains the same value as the original tensor
__a , __a : str = sf.read(_lowercase )
self.assertTrue(torch.allclose(_lowercase , torch.tensor(_lowercase ) , atol=1e-4 ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__a : Optional[int] = get_new_path(suffix=""".wav""" )
sf.write(_lowercase , _lowercase , 16000 )
__a : Union[str, Any] = AgentAudio(_lowercase )
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _lowercase )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = torch.randint(0 , 256 , (64, 64, 3) )
__a : Dict = AgentImage(_lowercase )
__a : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__a : List[str] = Image.open(_lowercase )
__a : Optional[int] = AgentImage(_lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__a : List[str] = Image.open(_lowercase )
__a : str = AgentImage(_lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = """Hey!"""
__a : int = AgentText(_lowercase )
self.assertEqual(_lowercase , agent_type.to_string() )
self.assertEqual(_lowercase , agent_type.to_raw() )
self.assertEqual(_lowercase , _lowercase )
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase__ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : Tuple ):
__a : List[str] = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def __magic_name__ ( _lowerCamelCase : Optional[Any] = 1_0_0_0_0_0_0 ):
__a : Any = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 700 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
_lowerCAmelCase = "bert"
def __init__(self , _lowercase=30522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__a : Any = vocab_size
__a : Tuple = hidden_size
__a : Tuple = num_hidden_layers
__a : int = num_attention_heads
__a : List[str] = hidden_act
__a : Optional[int] = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Optional[Any] = type_vocab_size
__a : List[str] = initializer_range
__a : str = layer_norm_eps
__a : Optional[Any] = position_embedding_type
__a : Optional[int] = use_cache
__a : List[Any] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 701 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = ['model.decoder.embed_positions.weights']
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
if "emb" in name:
__a : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__a : Dict = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__a : Dict = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__a : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__a : Tuple = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__a : Union[str, Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__a : Any = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__a : List[str] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__a : str = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__a : List[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__a : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
__a : str = list(state_dict.keys() )
__a : Optional[Any] = {}
for key in keys:
__a : Union[str, Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : str = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
__a : int = val[:hidden_size, :]
__a : List[str] = val[hidden_size : 2 * hidden_size, :]
__a : int = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__a : Optional[Any] = val
else:
__a : str = val
return state_dict, enc_dec_proj_state_dict
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if checkpoint == "small":
# default config values
__a : Union[str, Any] = 1_0_2_4
__a : Optional[Any] = 2_4
__a : int = 1_6
elif checkpoint == "medium":
__a : List[str] = 1_5_3_6
__a : Tuple = 4_8
__a : List[str] = 2_4
elif checkpoint == "large":
__a : Union[str, Any] = 2_0_4_8
__a : Tuple = 4_8
__a : List[str] = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__a : Dict = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : int="cpu" ):
__a : Union[str, Any] = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
__a : List[str] = fairseq_model.lm.state_dict()
__a , __a : Optional[Any] = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
__a : str = TaEncoderModel.from_pretrained("""t5-base""" )
__a : Optional[int] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__a : Union[str, Any] = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__a , __a : str = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__a : Tuple = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
__a : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__a : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__a : Dict = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__a : str = AutoTokenizer.from_pretrained("""t5-base""" )
__a : Optional[int] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__a : Dict = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
__a : int = 2_0_4_8
__a : int = 2_0_4_8
# set other default generation config params
__a : Optional[Any] = int(3_0 * audio_encoder.config.frame_rate )
__a : Dict = True
__a : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
'''simple docstring'''
__a : Any = parent
__a : Union[str, Any] = 13
__a : Optional[int] = 7
__a : Optional[int] = True
__a : Dict = True
__a : Optional[Any] = True
__a : Optional[Any] = True
__a : Union[str, Any] = 99
__a : str = 384
__a : Optional[Any] = 2
__a : str = 4
__a : List[str] = 37
__a : Optional[Any] = """gelu"""
__a : str = 0.1
__a : Union[str, Any] = 0.1
__a : List[Any] = 512
__a : List[Any] = 16
__a : Dict = 2
__a : Dict = 0.02
__a : str = 3
__a : Any = 4
__a : str = 128
__a : str = 2
__a : int = 9
__a : Union[str, Any] = 1
__a : Optional[int] = None
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Tuple = None
if self.use_input_mask:
__a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Tuple = None
__a : Tuple = None
__a : Optional[int] = None
if self.use_labels:
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = TFConvBertModel(config=A_ )
__a : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__a : int = [input_ids, input_mask]
__a : Tuple = model(A_ )
__a : str = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = TFConvBertForMaskedLM(config=A_ )
__a : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a : int = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self.num_labels
__a : int = TFConvBertForSequenceClassification(config=A_ )
__a : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.num_choices
__a : Union[str, Any] = TFConvBertForMultipleChoice(config=A_ )
__a : Union[str, Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__a : Optional[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__a : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__a : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__a : int = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : List[str] = TFConvBertForTokenClassification(config=A_ )
__a : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a : Optional[int] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = TFConvBertForQuestionAnswering(config=A_ )
__a : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a : str = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = config_and_inputs
__a : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFConvBertModelTester(self )
__a : Any = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : str = True
__a : Union[str, Any] = True
if hasattr(A_ , """use_cache""" ):
__a : Optional[int] = True
__a : int = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__a : int = getattr(self.model_tester , """key_length""" , A_ )
for model_class in self.all_model_classes:
__a : List[Any] = self._prepare_for_class(A_ , A_ )
__a : Union[str, Any] = model_class(A_ )
__a : Tuple = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
__a : int = os.path.join(A_ , """saved_model""" , """1""" )
__a : Tuple = tf.keras.models.load_model(A_ )
__a : int = model(A_ )
if self.is_encoder_decoder:
__a : str = outputs["""encoder_hidden_states"""]
__a : Tuple = outputs["""encoder_attentions"""]
else:
__a : List[str] = outputs["""hidden_states"""]
__a : Any = outputs["""attentions"""]
self.assertEqual(len(A_ ) , A_ )
__a : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(A_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = True
__a : Any = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__a : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__a : str = getattr(self.model_tester , """key_length""" , A_ )
__a : Optional[int] = getattr(self.model_tester , """key_length""" , A_ )
def check_decoder_attentions_output(_lowercase ):
__a : int = len(A_ )
self.assertEqual(out_len % 2 , 0 )
__a : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
__a : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a : Dict = True
__a : List[Any] = False
__a : Optional[int] = model_class(A_ )
__a : Tuple = model(self._prepare_for_class(A_ , A_ ) )
__a : Optional[int] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
__a : Tuple = model_class(A_ )
__a : List[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a : Union[str, Any] = True
__a : Union[str, Any] = model_class(A_ )
__a : Union[str, Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
__a : int = True
__a : int = True
__a : Optional[int] = model_class(A_ )
__a : Union[str, Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__a : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a : int = model(A_ )[0]
__a : Tuple = [1, 6, 768]
self.assertEqual(output.shape , A_ )
__a : Union[str, Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 703 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
__a : str = set()
__a : Optional[Any] = []
def parse_line(_lowerCamelCase : List[Any] ):
for line in fp:
if isinstance(_lowercase , _lowercase ):
__a : List[str] = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase ) > 0:
__a : List[str] = '\n'.join(_lowercase )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_lowercase )
buffer.clear()
continue
else:
__a : Any = line.strip()
buffer.append(_lowercase )
if from_gh:
for filename in os.listdir(_lowercase ):
__a : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase ) as fp:
parse_line(_lowercase )
else:
try:
with zipfile.ZipFile(_lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase ) as fp:
parse_line(_lowercase )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any ):
__a : Tuple = set()
__a : Any = [os.path.join(_lowercase , _lowercase ) for p in os.listdir(_lowercase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase , _lowercase ) )
return selected_warnings
if __name__ == "__main__":
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
return values.split(""",""" )
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
lowercase__ = parser.parse_args()
lowercase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase__ = extract_warnings(args.output_dir, args.targets)
lowercase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 704 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "sentencepiece.model"}
lowercase__ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
lowercase__ = {
"google/rembert": 256,
}
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , _lowercase , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase="[CLS]" , _lowercase="[SEP]" , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , **_lowercase , ):
'''simple docstring'''
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
__a : Tuple = do_lower_case
__a : List[Any] = remove_space
__a : Any = keep_accents
__a : Tuple = vocab_file
__a : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Union[str, Any] = None
return state
def __setstate__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = d
__a : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__(self , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Dict = self.sp_model.EncodeAsPieces(UpperCAmelCase_ )
return pieces
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Tuple = self.sp_model.decode_pieces(UpperCAmelCase_ )
return out_string
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase_ ) )
return
__a : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 706 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
_lowerCAmelCase = 'esm'
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1026 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , mask_token_id=_lowercase , **_lowercase )
__a : Tuple = vocab_size
__a : List[str] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Dict = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Optional[int] = initializer_range
__a : Any = layer_norm_eps
__a : Tuple = position_embedding_type
__a : Dict = use_cache
__a : str = emb_layer_norm_before
__a : Union[str, Any] = token_dropout
__a : int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__a : List[str] = EsmFoldConfig()
elif isinstance(_lowercase , _lowercase ):
__a : int = EsmFoldConfig(**_lowercase )
__a : int = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__a : List[str] = get_default_vocab_list()
else:
__a : Any = vocab_list
else:
__a : str = None
__a : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , _lowercase ):
__a : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = None
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.trunk is None:
__a : List[Any] = TrunkConfig()
elif isinstance(self.trunk , _lowercase ):
__a : Any = TrunkConfig(**self.trunk )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = asdict(self )
__a : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 4_8
_lowerCAmelCase = 1_0_2_4
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 3_2
_lowerCAmelCase = 3_2
_lowerCAmelCase = 3_2
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = None
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.structure_module is None:
__a : str = StructureModuleConfig()
elif isinstance(self.structure_module , _lowercase ):
__a : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__a : Dict = self.sequence_state_dim // self.sequence_head_width
__a : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = asdict(self )
__a : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 3_8_4
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 1_6
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 1_2
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 1_0
_lowerCAmelCase = 1E-8
_lowerCAmelCase = 1E5
def lowerCAmelCase__(self ):
'''simple docstring'''
return asdict(self )
def __magic_name__ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 707 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
__a : Union[str, Any] = LxmertConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
__a : int = LxmertForPreTraining(__A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__A , __A , __A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt'}
lowercase__ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
lowercase__ = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def __magic_name__ ( _lowerCamelCase : List[str] ):
with open(__lowerCAmelCase , """r""" ) as f:
__a : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__(self , _lowercase , _lowercase="<unk>" , _lowercase="<cls>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase="<eos>" , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
__a : Dict = load_vocab_file(_lowerCAmelCase )
__a : List[str] = dict(enumerate(self.all_tokens ) )
__a : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__a : List[str] = unk_token
__a : Tuple = cls_token
__a : Dict = pad_token
__a : Union[str, Any] = mask_token
__a : Union[str, Any] = eos_token
__a : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self._id_to_token.get(_lowerCAmelCase , self.unk_token )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__(self , _lowercase , **_lowercase ):
'''simple docstring'''
return text.split()
def lowerCAmelCase__(self , _lowercase=False ):
'''simple docstring'''
return len(self._id_to_token )
def lowerCAmelCase__(self ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self._id_to_token.get(_lowerCAmelCase , self.unk_token )
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Tuple = [self.cls_token_id]
__a : str = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__a : Tuple = [1] + ([0] * len(_lowerCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowerCAmelCase ) + [1]
return mask
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = os.path.join(_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=_lowerCAmelCase )
def lowerCAmelCase__(self , _lowercase , _lowercase = False ):
'''simple docstring'''
return super()._add_tokens(_lowerCAmelCase , special_tokens=_lowerCAmelCase )
| 709 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowercase__ = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def __magic_name__ ( ):
__a : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__a : Optional[Any] = bs[:]
__a : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__a : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def __magic_name__ ( _lowerCamelCase : Tuple ):
__a : Dict = set()
__a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : Optional[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__(self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__a : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__a : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__a : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__a : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__a : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
__a : int = json.load(UpperCamelCase_ )
__a : Any = {v: k for k, v in self.encoder.items()}
__a : Any = errors # how to handle errors in decoding
__a : str = bytes_to_unicode()
__a : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
__a : str = merges_handle.read().split("""\n""" )[1:-1]
__a : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__a : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__a : Optional[int] = {}
__a : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : Dict = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__(self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : List[str] = tuple(UpperCamelCase_ )
__a : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__a : str = min(UpperCamelCase_ , key=lambda _lowercase : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__a : List[Any] = bigram
__a : Any = []
__a : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__a : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : Dict = tuple(UpperCamelCase_ )
__a : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__a : int = get_pairs(UpperCamelCase_ )
__a : Optional[int] = " ".join(UpperCamelCase_ )
__a : Dict = word
return word
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__a : Any = "".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[str] = "".join(UpperCamelCase_ )
__a : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
__a : str = 0
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__a : str = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
__a : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : int = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__(self , _lowercase , _lowercase=False , **_lowercase ):
'''simple docstring'''
__a : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__a : Tuple = " " + text
return (text, kwargs)
| 710 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__a : Union[str, Any] = None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a : int = str(distributed_port + 1 )
__a : Any = dist.new_group(ranks=__A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCAmelCase__(self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=torch.floataa ):
'''simple docstring'''
__a : Dict = torch.empty(__A , dtype=__A )
dist.scatter(__A , src=0 , scatter_list=__A , group=self.process_group )
return target_tensor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , __A )
return ifname
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if not dist.is_initialized():
__a : Dict = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
# distributed training
__a : Optional[Any] = dist.get_world_size(group=self.process_group )
# gather logic
__a : Optional[int] = None
if self._is_main():
__a : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__A )]
dist.gather(torch.tensor(__A ) , dst=0 , gather_list=__A , group=self.process_group )
# scatter logic
__a : List[Any] = question_hidden_states.shape[0]
__a : Optional[int] = []
__a : Dict = []
if self._is_main():
assert len(__A ) == world_size
__a : Union[str, Any] = self._main_retrieve(torch.cat(__A ).numpy() , __A )
__a : Union[str, Any] = torch.tensor(__A ), torch.tensor(__A )
__a : Optional[int] = self._chunk_tensor(__A , __A )
__a : Any = self._chunk_tensor(__A , __A )
__a : Optional[int] = self._scattered(__A , [n_queries, n_docs] , target_type=torch.intaa )
__a : Optional[int] = self._scattered(__A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__A )
| 711 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : Any ):
__a : Optional[int] = [True] * limit
__a : int = False
__a : Any = False
__a : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__a : Dict = i * 2
while index < limit:
__a : int = False
__a : List[str] = index + i
__a : Any = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __magic_name__ ( _lowerCamelCase : Tuple = 1_0_0_0_0_0_0 ):
__a : List[Any] = prime_sieve(__lowerCAmelCase )
__a : Dict = 0
__a : Tuple = 0
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + length , len(__lowerCAmelCase ) ):
__a : Tuple = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__a : Any = j - i
__a : Dict = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 712 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase__ : List[str] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
lowercase__ : Tuple = "hopper-medium-v2"
lowercase__ : List[Any] = gym.make(env_name)
lowercase__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
lowercase__ : Optional[Any] = env.reset()
lowercase__ : Dict = 0
lowercase__ : Optional[int] = 0
lowercase__ : Dict = 1000
lowercase__ : Optional[int] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase__ : List[str] = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = env.step(denorm_actions)
lowercase__ : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase__ : Dict = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 713 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`") | 714 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : str = data
# Initialize hash values
__a : Any = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
__a : int = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
__a : Union[str, Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
__a : Optional[int] = b"""\x80""" + (b"""\x00""" * (63 - (len(_UpperCamelCase ) + 8) % 64))
__a : List[Any] = struct.pack(""">Q""" , (len(_UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Dict = list(struct.unpack(""">16L""" , _UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__a : Optional[int] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__a : Tuple = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__a : Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__a : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
__a : Optional[int] = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 11 ) ^ self.ror(_UpperCamelCase , 25 )
__a : str = (e & f) ^ ((~e & 0xff_fff_fff) & g)
__a : Union[str, Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
__a : List[str] = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 13 ) ^ self.ror(_UpperCamelCase , 22 )
__a : List[str] = (a & b) ^ (a & c) ^ (b & c)
__a : Dict = (sa + maj) % 0x100_000_000
__a : List[str] = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
__a : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
__a : List[str] = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
__a : Union[str, Any] = """""".join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
import hashlib
__a : str = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() )
def __magic_name__ ( ):
import doctest
doctest.testmod()
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__a : Optional[Any] = parser.parse_args()
__a : Union[str, Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__a : Tuple = f.read()
else:
__a : List[Any] = bytes(__A , """utf-8""" )
print(SHAaaa(__A ).hash )
if __name__ == "__main__":
main()
| 715 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase__ = logging.get_logger(__name__)
lowercase__ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase__ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase__ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase__ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase__ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase__ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase__ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase__ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase__ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase__ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase__ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_MAPPING
lowercase__ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 716 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 0 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict ):
__a : Tuple = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__a : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__a : str = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowercase__ = int(input("Enter number of vertices: "))
lowercase__ = int(input("Enter number of edges: "))
lowercase__ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
lowercase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
lowercase__ = int(input("Enter source:"))
lowercase__ = int(input("Enter destination:"))
lowercase__ = float(input("Enter weight:"))
lowercase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 717 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_lowerCAmelCase = (DDPMParallelScheduler,)
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
__a : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def lowerCAmelCase__(self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def lowerCAmelCase__(self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Tuple = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.scheduler_classes[0]
__a : List[Any] = self.get_scheduler_config()
__a : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
__a : Any = len(lowerCamelCase_ )
__a : Optional[int] = self.dummy_model()
__a : Dict = self.dummy_sample_deter
__a : Dict = self.dummy_sample_deter + 0.1
__a : int = self.dummy_sample_deter - 0.1
__a : List[str] = samplea.shape[0]
__a : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : List[str] = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 , lowerCamelCase_ )
__a : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : List[str] = scheduler.batch_step_no_noise(lowerCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__a : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
__a : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.scheduler_classes[0]
__a : List[Any] = self.get_scheduler_config()
__a : Dict = scheduler_class(**lowerCamelCase_ )
__a : List[str] = len(lowerCamelCase_ )
__a : Dict = self.dummy_model()
__a : Union[str, Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
__a : Tuple = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
__a : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
__a : Tuple = pred_prev_sample
__a : Dict = torch.sum(torch.abs(lowerCamelCase_ ) )
__a : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__a : Any = scheduler_class(**lowerCamelCase_ )
__a : List[Any] = len(lowerCamelCase_ )
__a : Dict = self.dummy_model()
__a : Optional[int] = self.dummy_sample_deter
__a : List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
__a : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
__a : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
__a : Optional[int] = pred_prev_sample
__a : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
__a : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Optional[int] = scheduler_class(**lowerCamelCase_ )
__a : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
__a : List[str] = -1
else:
__a : int = timesteps[i + 1]
__a : List[Any] = scheduler.previous_timestep(lowerCamelCase_ )
__a : Optional[Any] = prev_t.item()
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.scheduler_classes[0]
__a : Tuple = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**lowerCamelCase_ )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : int = scheduler_class(**lowerCamelCase_ )
__a : Tuple = [100, 87, 50, 1, 0]
__a : Optional[int] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : List[Any] = scheduler_class(**lowerCamelCase_ )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 718 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __magic_name__ ( _lowerCamelCase : Any ):
__a : Tuple = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
__a , __a : Optional[Any] = emb.weight.shape
__a : Optional[int] = nn.Linear(_A , _A , bias=_A )
__a : str = emb.weight.data
return lin_layer
def __magic_name__ ( _lowerCamelCase : List[Any] ):
__a : Tuple = torch.load(_A , map_location="""cpu""" )
__a : Tuple = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__a : Union[str, Any] = mam_aaa["""model"""]
remove_ignore_keys_(_A )
__a : List[Any] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__a : Optional[int] = MaMaaaConfig(
vocab_size=_A , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__a : List[str] = state_dict["""decoder.embed_tokens.weight"""]
__a : Any = MaMaaaForConditionalGeneration(_A )
model.model.load_state_dict(_A , strict=_A )
__a : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ = parser.parse_args()
lowercase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 719 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = None ):
'''simple docstring'''
__a : Optional[Any] = value
__a : Optional[Any] = None # Added in order to delete a node easier
__a : List[str] = None
__a : Dict = None
def __repr__(self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = None ):
'''simple docstring'''
__a : Any = root
def __str__(self ):
'''simple docstring'''
return str(self.root )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if new_children is not None: # reset its kids
__a : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
__a : Tuple = new_children
else:
__a : Optional[Any] = new_children
else:
__a : str = new_children
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.root is None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
__a : Dict = new_node # set its root
else: # Tree is not empty
__a : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__a : Any = new_node # We insert the new node in a leaf
break
else:
__a : Tuple = parent_node.left
else:
if parent_node.right is None:
__a : Union[str, Any] = new_node
break
else:
__a : Optional[int] = parent_node.right
__a : Tuple = parent_node
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
for value in values:
self.__insert(snake_case_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__a : Tuple = node.left if value < node.value else node.right
return node
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
__a : List[Any] = self.root
if not self.empty():
while node.right is not None:
__a : Optional[Any] = node.right
return node
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
if node is None:
__a : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
__a : str = self.root
while node.left is not None:
__a : str = node.left
return node
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
__a : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__a : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCAmelCase__(self , _lowercase=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
__a : Optional[Any] = []
if curr_node is not None:
__a : Optional[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __magic_name__ ( ):
__a : Any = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
__a : List[str] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , ):
'''simple docstring'''
__a : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__a : str = parent
__a : List[str] = batch_size
__a : str = num_channels
__a : Dict = image_size
__a : Dict = min_resolution
__a : str = max_resolution
__a : Dict = do_resize
__a : List[str] = size
__a : Union[str, Any] = apply_ocr
def lowerCAmelCase__(self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """apply_ocr""" ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
__a : Union[str, Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
__a : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a : Union[str, Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : Dict = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__a : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__a : List[str] = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__a : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
__a : int = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
__a : int = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 721 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
_lowerCAmelCase = '''sew-d'''
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-7 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__a : List[str] = hidden_size
__a : List[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Union[str, Any] = list(A_ )
__a : Dict = list(A_ )
__a : Optional[Any] = list(A_ )
__a : Any = conv_bias
__a : Union[str, Any] = num_conv_pos_embeddings
__a : str = num_conv_pos_embedding_groups
__a : Any = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : int = intermediate_size
__a : Optional[int] = squeeze_factor
__a : Tuple = max_position_embeddings
__a : List[Any] = position_buckets
__a : int = share_att_key
__a : Union[str, Any] = relative_attention
__a : Optional[Any] = norm_rel_ebd
__a : str = list(A_ )
__a : Union[str, Any] = hidden_act
__a : Dict = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Tuple = final_dropout
__a : List[str] = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : List[str] = initializer_range
__a : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Any = apply_spec_augment
__a : int = mask_time_prob
__a : str = mask_time_length
__a : Optional[Any] = mask_time_min_masks
__a : int = mask_feature_prob
__a : Optional[int] = mask_feature_length
__a : Union[str, Any] = mask_feature_min_masks
# ctc loss
__a : Dict = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Tuple = classifier_proj_size
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = TaTokenizer
_lowerCAmelCase = []
def __init__(self , _lowercase=None , _lowercase=None , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=100 , _lowercase=None , **_lowercase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__a : Tuple = [F'''<extra_id_{i}>''' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : List[Any] = len(set(filter(lambda _lowercase : bool("""extra_id_""" in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
_lowercase , tokenizer_file=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
__a : Dict = vocab_file
__a : Tuple = False if not self.vocab_file else True
__a : Tuple = extra_ids
@staticmethod
def lowerCAmelCase__(_lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _lowercase , )
return max_model_length
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : str = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Tuple = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase__(self ):
'''simple docstring'''
return list(
set(filter(lambda _lowercase : bool(re.search(r"""<extra_id_\d+>""" , _lowercase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(_lowercase ) for token in self.get_sentinel_tokens()]
| 701 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase__ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__a : int = """lm_head"""
__a : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__a : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__a : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__a : str = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : List[Any] = value
elif weight_type == "bias":
__a : List[str] = value
else:
__a : str = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ):
__a : Dict = []
__a : List[Any] = fairseq_model.state_dict()
__a : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__a : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Any = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : Tuple = True
if "*" in mapped_key:
__a : str = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__a : Union[str, Any] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__a : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__a : Optional[int] = """weight_v"""
elif "bias" in name:
__a : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : Optional[Any] = """weight"""
else:
__a : Tuple = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Dict = name.split(""".""" )
__a : List[str] = int(items[0] )
__a : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__a : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__a : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__a : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__a : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Dict=True ):
if config_path is not None:
__a : Tuple = UniSpeechConfig.from_pretrained(_lowerCamelCase )
else:
__a : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
__a : Tuple = Dictionary.load_from_json(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : Dict = target_dict.bos_index
__a : Tuple = target_dict.eos_index
__a : Tuple = len(target_dict.symbols )
__a : str = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__a : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Any = 4_2
__a : Optional[int] = 4_3
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__a : Optional[int] = WavaVecaPhonemeCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__a : str = True if config.feat_extract_norm == """layer""" else False
__a : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__a : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__a : List[str] = UniSpeechForCTC(_lowerCamelCase )
else:
__a : int = UniSpeechForPreTraining(_lowerCamelCase )
if is_finetuned:
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
__a , __a , __a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a : List[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_unispeech.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase__ = 50003
lowercase__ = 50002
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase = PLBartTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : List[Any] = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
__a : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a : Optional[int] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
__a : int = tokenizer.vocab_size
__a : Tuple = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 4 , __A )]
self.assertListEqual(__A , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
__a : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__a : Dict = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = PLBartTokenizer(__A , language_codes="""multi""" , keep_accents=__A )
__a : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a : Dict = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
__a : List[Any] = tokenizer.vocab_size
__a : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 7 , __A )]
self.assertListEqual(
__A , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
__a : str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__a : List[Any] = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = "uclanlp/plbart-python-en_XX"
_lowerCAmelCase = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_lowerCAmelCase = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_lowerCAmelCase = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__(cls ):
'''simple docstring'''
__a : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
__a : Optional[Any] = 1
return cls
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50003 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __A )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertIn(__A , self.tokenizer.all_special_ids )
__a : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
__a : List[str] = self.tokenizer.decode(__A , skip_special_tokens=__A )
__a : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A )
self.assertEqual(__A , __A )
self.assertNotIn(self.tokenizer.eos_token , __A )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __A )
__a : Dict = 10
__a : List[Any] = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __A )
self.assertEqual(len(__A ) , __A )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50004, 50001] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
__a : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
__a : List[Any] = PLBartTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__A , return_tensors="""pt""" )
__a : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __A )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__a : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__a : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="""pt""" )
__a : str = self.tokenizer(
text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="""pt""" )
__a : int = targets["input_ids"]
__a : Tuple = shift_tokens_right(__A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} , )
| 703 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__a : Union[str, Any] = [1_4_4, 1_9_2, 2_4_0]
__a : int = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__a : List[str] = [9_6, 1_2_0, 1_4_4]
__a : Any = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__a : Any = [6_4, 8_0, 9_6]
__a : List[str] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__a : Any = 0.05
__a : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__a : int = 5_1_2
__a : Optional[int] = 1_6
__a : List[Any] = 2_1
__a : List[str] = """pascal-voc-id2label.json"""
else:
__a : str = 1_0_0_0
__a : Any = """imagenet-1k-id2label.json"""
__a : Any = """huggingface/label-files"""
__a : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__a : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : List[str] = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=False ):
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
__a : Tuple = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__a : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__a : Optional[Any] = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__a : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__a : int = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__a : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__a : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__a : Tuple = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__a : List[Any] = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__a : str = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
__a : Tuple = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
__a : Dict = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
__a : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__a : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__a : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
__a : List[Any] = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if F'''.global_rep.{i}.bias''' in name:
__a : Optional[int] = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
__a : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__a : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__a : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__a : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__a : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__a : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__a : Any = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__a : int = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__a : Tuple = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__a : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__a : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__a : str = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__a : str = """mobilevit.""" + name
return name
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=False ):
if base_model:
__a : Dict = """"""
else:
__a : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
__a : List[Any] = orig_state_dict.pop(_lowerCamelCase )
if key[:8] == "encoder.":
__a : int = key[8:]
if "qkv" in key:
__a : Any = key.split(""".""" )
__a : str = int(key_split[0][6:] ) - 1
__a : int = int(key_split[3] )
__a : Optional[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
__a : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__a : Optional[Any] = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__a : Dict = val[:dim, :]
__a : Optional[int] = val[dim : dim * 2, :]
__a : List[Any] = val[-dim:, :]
else:
__a : Optional[Any] = val[:dim]
__a : List[Any] = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : List[str] = val
return orig_state_dict
def __magic_name__ ( ):
__a : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : int=False ):
__a : Optional[Any] = get_mobilevit_config(_lowerCamelCase )
# load original state_dict
__a : List[Any] = torch.load(_lowerCamelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__a : List[str] = MobileViTForSemanticSegmentation(_lowerCamelCase ).eval()
else:
__a : str = MobileViTForImageClassification(_lowerCamelCase ).eval()
__a : str = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__a : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__a : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a : List[Any] = model(**_lowerCamelCase )
__a : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__a : int = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__a : Tuple = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__a : Tuple = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__a : Tuple = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
__a : Any = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
__a : Union[str, Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
__a : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__a : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowerCamelCase , organization="""apple""" )
model.push_to_hub(_lowerCamelCase , organization="""apple""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',"
" \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __a , unittest.TestCase ):
_lowerCAmelCase = LongformerTokenizer
_lowerCAmelCase = True
_lowerCAmelCase = LongformerTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : List[str] = dict(zip(a_ , range(len(a_ ) ) ) )
__a : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : Any = {"""unk_token""": """<unk>"""}
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = """lower newer"""
__a : Optional[int] = """lower newer"""
return input_text, output_text
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : int = """lower newer"""
__a : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__a : int = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
__a : Any = tokens + [tokenizer.unk_token]
__a : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=a_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=a_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__a : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=a_ )
__a : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=a_ )
__a : Tuple = tokenizer.encode(
"""sequence builders""" , add_special_tokens=a_ , add_prefix_space=a_ )
__a : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=a_ , add_prefix_space=a_ )
__a : List[str] = tokenizer.build_inputs_with_special_tokens(a_ )
__a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.get_tokenizer()
__a : Dict = """Encode this sequence."""
__a : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__a : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
__a : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__a : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
__a : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
__a : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
__a : str = tokenizer.convert_tokens_to_ids(a_ )
__a : int = """Encode <mask> sequence"""
__a : Tuple = """Encode <mask>sequence"""
__a : List[str] = tokenizer.encode(a_ )
__a : Optional[int] = encoded.index(a_ )
__a : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
__a : Union[str, Any] = tokenizer.encode(a_ )
__a : Optional[int] = encoded.index(a_ )
__a : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
__a : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
__a : List[str] = """A, <mask> AllenNLP sentence."""
__a : Tuple = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
__a : Optional[int] = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : int = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
a_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase__(self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , a_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , a_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , a_ )
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__a : int = F'''{text_of_1_token} {text_of_1_token}'''
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
__a : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
__a : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
__a : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Any = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__a : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
| 706 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = tempfile.mkdtemp()
__a : Tuple = BlipImageProcessor()
__a : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__a : List[str] = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : int = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a : List[str] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__a : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : List[str] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__a : Tuple = self.prepare_image_inputs()
__a : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors="""np""" )
__a : List[str] = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : List[Any] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__a : Any = """lower newer"""
__a : Dict = processor(text=__lowerCAmelCase )
__a : List[str] = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[Any] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__a : Dict = """lower newer"""
__a : Tuple = self.prepare_image_inputs()
__a : Any = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : List[Any] = self.get_tokenizer()
__a : Optional[int] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__a : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Dict = processor.batch_decode(__lowerCAmelCase )
__a : str = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : Any = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__a : Tuple = """lower newer"""
__a : Optional[int] = self.prepare_image_inputs()
__a : List[str] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 707 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class SCREAMING_SNAKE_CASE__ :
pass
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
__a : List[str] = 0
if start < end:
__a : Tuple = randint(_lowercase , _lowercase )
__a : List[str] = a[end]
__a : str = a[pivot]
__a : Optional[int] = temp
__a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
__a : Union[str, Any] = 0
__a : List[Any] = randint(_lowercase , _lowercase )
__a : int = a[end]
__a : List[str] = a[pivot]
__a : Tuple = temp
__a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__a : List[str] = new_pivot_index + 1
__a : Optional[int] = a[new_pivot_index]
__a : Union[str, Any] = a[index]
__a : List[Any] = temp
__a : Tuple = a[new_pivot_index + 1]
__a : str = a[end]
__a : Dict = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 709 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_lowerCAmelCase = "levit"
def __init__(self , _lowercase=224 , _lowercase=3 , _lowercase=3 , _lowercase=2 , _lowercase=1 , _lowercase=16 , _lowercase=[128, 256, 384] , _lowercase=[4, 8, 12] , _lowercase=[4, 4, 4] , _lowercase=[16, 16, 16] , _lowercase=0 , _lowercase=[2, 2, 2] , _lowercase=[2, 2, 2] , _lowercase=0.02 , **_lowercase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__a : int = image_size
__a : Optional[int] = num_channels
__a : str = kernel_size
__a : Union[str, Any] = stride
__a : Dict = padding
__a : Optional[int] = hidden_sizes
__a : Any = num_attention_heads
__a : List[str] = depths
__a : Any = key_dim
__a : Union[str, Any] = drop_path_rate
__a : str = patch_size
__a : Tuple = attention_ratio
__a : Optional[Any] = mlp_ratio
__a : Dict = initializer_range
__a : List[str] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_lowerCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-4
| 710 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase__ = "bart"
lowercase__ = True
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
if LOAD_DENSE_INDEX:
__a : str = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__a : Optional[Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : Tuple = (None, None)
if MODEL_TYPE == "bart":
__a : Optional[int] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__a : Tuple = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__a : Optional[Any] = sas_model.eval()
else:
__a , __a : str = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Tuple = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__a : Union[str, Any] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
__a : List[str] = faiss.IndexFlatIP(1_2_8 )
__a : Dict = faiss.index_cpu_to_gpu(_snake_case , 1 , _snake_case )
wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU
else:
__a , __a : int = (None, None)
__a : Any = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
__a : Optional[int] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__a : Optional[int] = elia["""train_eli5"""]
__a : Optional[int] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
__a : Optional[int] = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_snake_case )
return (elia_train, eli5_train_q_index)
lowercase__ , lowercase__ , lowercase__ = load_indexes()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = load_models()
lowercase__ , lowercase__ = load_train_data()
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]=1_0 ):
__a : int = embed_questions_for_retrieval([question] , _snake_case , _snake_case )
__a , __a : Optional[int] = eli5_train_q_index.search(_snake_case , _snake_case )
__a : Optional[int] = [elia_train[int(_snake_case )] for i in I[0]]
return nn_examples
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]="wiki40b" , _lowerCamelCase : Dict="dense" , _lowerCamelCase : Dict=1_0 ):
if source == "none":
__a , __a : int = (""" <P> """.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
__a , __a : List[str] = query_qa_dense_index(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
__a , __a : List[str] = query_es_index(
_snake_case , _snake_case , index_name="""english_wiki40b_snippets_100w""" , n_results=_snake_case , )
__a : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__a : Union[str, Any] = """question: {} context: {}""".format(_snake_case , _snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=6_4 , _lowerCamelCase : int=2_5_6 , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Tuple=0.95 , _lowerCamelCase : int=0.8 ):
with torch.no_grad():
__a : Dict = qa_sas_generate(
_snake_case , _snake_case , _snake_case , num_answers=1 , num_beams=_snake_case , min_len=_snake_case , max_len=_snake_case , do_sample=_snake_case , temp=_snake_case , top_p=_snake_case , top_k=_snake_case , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
lowercase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
lowercase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
lowercase__ = st.sidebar.checkbox("Demo options")
if demo_options:
lowercase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
lowercase__ = action_list.index(action_st)
lowercase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
lowercase__ = show_type == "Show full text of passages"
else:
lowercase__ = 3
lowercase__ = True
lowercase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
lowercase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
lowercase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
lowercase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
lowercase__ = "wiki40b"
lowercase__ = "dense"
lowercase__ = "beam"
lowercase__ = 2
lowercase__ = 64
lowercase__ = 256
lowercase__ = None
lowercase__ = None
lowercase__ = st.sidebar.checkbox("Generation options")
if generate_options:
lowercase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
lowercase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
lowercase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowercase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowercase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase__ = None
# start main text
lowercase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
lowercase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase__ = st.text_input("Enter your question here:", "")
else:
lowercase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
lowercase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase__ = support_list[:10]
lowercase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase__ , lowercase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
lowercase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
lowercase__ = res[1].strip()
if sec_titles == "":
lowercase__ = "[{}]({})".format(res[0], wiki_url)
else:
lowercase__ = sec_titles.split(" & ")
lowercase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
lowercase__ = find_nearest_training(question)
lowercase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
lowercase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
lowercase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 711 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
_lowerCAmelCase = "openai/whisper-base"
_lowerCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_lowerCAmelCase = "transcriber"
_lowerCAmelCase = WhisperProcessor
_lowerCAmelCase = WhisperForConditionalGeneration
_lowerCAmelCase = ["audio"]
_lowerCAmelCase = ["text"]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.pre_processor(UpperCamelCase__ , return_tensors="""pt""" ).input_features
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.model.generate(inputs=UpperCamelCase__ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 712 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ : Optional[int] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
lowercase__ : Tuple = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
lowercase__ : List[Any] = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = RealmTokenizer
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__a : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
__a : str = do_lower_case
__a : List[Any] = strip_accents
__a : List[Any] = tokenize_chinese_chars
__a : Tuple = normalizer_class(**_SCREAMING_SNAKE_CASE )
__a : Optional[int] = do_lower_case
def lowerCAmelCase__(self , _lowercase , **_lowercase ):
'''simple docstring'''
__a : List[Any] = PaddingStrategy.MAX_LENGTH
__a : Union[str, Any] = text
__a : Optional[Any] = kwargs.pop("""text_pair""" , _SCREAMING_SNAKE_CASE )
__a : int = kwargs.pop("""return_tensors""" , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
__a : str = batch_text_pair[idx]
else:
__a : Optional[Any] = None
__a : Optional[Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = encoded_candidates.get("""input_ids""" )
__a : Any = encoded_candidates.get("""attention_mask""" )
__a : Any = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : int = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 713 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 0 |
"""simple docstring"""
import os
import sys
lowercase__ = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase__ = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __magic_name__ ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[str] ):
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __magic_name__ ( *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[Any] ):
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __magic_name__ ( *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __magic_name__ ( *_lowerCamelCase : Tuple , **_lowerCamelCase : int ):
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __magic_name__ ( *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __magic_name__ ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Union[str, Any] ):
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __magic_name__ ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Tuple ):
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase ) | 714 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
lowercase__ = "▁"
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__(self , _lowercase , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=100 , _lowercase=None , _lowercase = None , _lowercase=True , **_lowercase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__a : str = [F'''<extra_id_{i}>''' for i in range(__lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a : Optional[int] = len(set(filter(lambda _lowercase : bool("""extra_id""" in str(__lowercase ) ) , __lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__a : Union[str, Any] = legacy
__a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , extra_ids=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowercase , **__lowercase , )
__a : List[str] = vocab_file
__a : Optional[int] = extra_ids
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@staticmethod
def lowerCAmelCase__(_lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__a : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __lowercase , )
return max_model_length
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowercase )) + [1]
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def lowerCAmelCase__(self ):
'''simple docstring'''
return list(
set(filter(lambda _lowercase : bool(re.search(r"""<extra_id_\d+>""" , __lowercase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : int = self._add_eos_if_not_present(__lowercase )
if token_ids_a is None:
return token_ids_a
else:
__a : Dict = self._add_eos_if_not_present(__lowercase )
return token_ids_a + token_ids_a
def __getstate__(self ):
'''simple docstring'''
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = None
return state
def __setstate__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : Tuple = {}
__a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__(self , _lowercase , **_lowercase ):
'''simple docstring'''
if not self.legacy:
__a : Union[str, Any] = SPIECE_UNDERLINE + text.replace(__lowercase , """ """ )
return super().tokenize(__lowercase , **__lowercase )
def lowerCAmelCase__(self , _lowercase , **_lowercase ):
'''simple docstring'''
if not self.legacy:
__a : int = text.startswith(__lowercase )
if is_first:
__a : Optional[int] = text[1:]
__a : Union[str, Any] = self.sp_model.encode(__lowercase , out_type=__lowercase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__lowercase ):
__a : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
__a : Any = re.match(r"""<extra_id_(\d+)>""" , __lowercase )
__a : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__a : List[Any] = self.sp_model.IdToPiece(__lowercase )
else:
__a : Optional[int] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = []
__a : Tuple = """"""
__a : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
__a : Any = True
__a : List[Any] = []
else:
current_sub_tokens.append(__lowercase )
__a : int = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : str = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
__a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 715 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 'AutoTokenizer'
_lowerCAmelCase = ['tokenizer']
_lowerCAmelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , _lowercase , _lowercase=None ):
'''simple docstring'''
super().__init__(A_ )
__a : List[Any] = speaker_embeddings
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase="speaker_embeddings_path.json" , **_lowercase ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__a : Any = get_file_from_repo(
A_ , A_ , subfolder=kwargs.pop("""subfolder""" , A_ ) , cache_dir=kwargs.pop("""cache_dir""" , A_ ) , force_download=kwargs.pop("""force_download""" , A_ ) , proxies=kwargs.pop("""proxies""" , A_ ) , resume_download=kwargs.pop("""resume_download""" , A_ ) , local_files_only=kwargs.pop("""local_files_only""" , A_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , A_ ) , revision=kwargs.pop("""revision""" , A_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(A_ , A_ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
__a : Optional[int] = None
else:
with open(A_ ) as speaker_embeddings_json:
__a : Optional[Any] = json.load(A_ )
else:
__a : Any = None
__a : Optional[int] = AutoTokenizer.from_pretrained(A_ , **A_ )
return cls(tokenizer=A_ , speaker_embeddings=A_ )
def lowerCAmelCase__(self , _lowercase , _lowercase="speaker_embeddings_path.json" , _lowercase="speaker_embeddings" , _lowercase = False , **_lowercase , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A_ , A_ , """v2""" ) , exist_ok=A_ )
__a : Dict = {}
__a : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__a : Any = self._load_voice_preset(A_ )
__a : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , A_ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=A_ , )
__a : Any = os.path.join(A_ , F'''{prompt_key}_{key}.npy''' )
__a : List[Any] = tmp_dict
with open(os.path.join(A_ , A_ ) , """w""" ) as fp:
json.dump(A_ , A_ )
super().save_pretrained(A_ , A_ , **A_ )
def lowerCAmelCase__(self , _lowercase = None , **_lowercase ):
'''simple docstring'''
__a : Optional[int] = self.speaker_embeddings[voice_preset]
__a : Dict = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
__a : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , A_ ) , cache_dir=kwargs.pop("""cache_dir""" , A_ ) , force_download=kwargs.pop("""force_download""" , A_ ) , proxies=kwargs.pop("""proxies""" , A_ ) , resume_download=kwargs.pop("""resume_download""" , A_ ) , local_files_only=kwargs.pop("""local_files_only""" , A_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , A_ ) , revision=kwargs.pop("""revision""" , A_ ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
__a : Dict = np.load(A_ )
return voice_preset_dict
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__(self , _lowercase=None , _lowercase=None , _lowercase="pt" , _lowercase=256 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(A_ , A_ ):
if (
isinstance(A_ , A_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__a : Union[str, Any] = self._load_voice_preset(A_ )
else:
if isinstance(A_ , A_ ) and not voice_preset.endswith(""".npz""" ):
__a : Any = voice_preset + ".npz"
__a : Optional[int] = np.load(A_ )
if voice_preset is not None:
self._validate_voice_preset_dict(A_ , **A_ )
__a : Optional[int] = BatchFeature(data=A_ , tensor_type=A_ )
__a : Tuple = self.tokenizer(
A_ , return_tensors=A_ , padding="""max_length""" , max_length=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , add_special_tokens=A_ , **A_ , )
if voice_preset is not None:
__a : Tuple = voice_preset
return encoded_text
| 716 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : list ):
if not nums:
raise ValueError("""List is empty""" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_lowerCAmelCase = ["input_features", "is_longer"]
def __init__(self , _lowercase=64 , _lowercase=48000 , _lowercase=480 , _lowercase=10 , _lowercase=1024 , _lowercase=0.0 , _lowercase=False , _lowercase = 0 , _lowercase = 14000 , _lowercase = None , _lowercase = "fusion" , _lowercase = "repeatpad" , **_lowercase , ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__a : int = top_db
__a : Any = truncation
__a : Optional[Any] = padding
__a : int = fft_window_size
__a : Optional[int] = (fft_window_size >> 1) + 1
__a : Optional[int] = hop_length
__a : Union[str, Any] = max_length_s
__a : Union[str, Any] = max_length_s * sampling_rate
__a : List[str] = sampling_rate
__a : Optional[int] = frequency_min
__a : List[Any] = frequency_max
__a : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="""htk""" , )
__a : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : int = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__a : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__a : List[Any] = [0]
# randomly choose index for each part
__a : List[Any] = np.random.choice(ranges[0] )
__a : List[str] = np.random.choice(ranges[1] )
__a : List[str] = np.random.choice(ranges[2] )
__a : Dict = mel[idx_front : idx_front + chunk_frames, :]
__a : int = mel[idx_middle : idx_middle + chunk_frames, :]
__a : int = mel[idx_back : idx_back + chunk_frames, :]
__a : Dict = torch.tensor(mel[None, None, :] )
__a : Any = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCamelCase__ )
__a : List[str] = mel_shrink[0][0].numpy()
__a : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__a : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__a : Optional[Any] = len(lowerCamelCase__ ) - max_length
__a : List[Any] = np.random.randint(0 , overflow + 1 )
__a : str = waveform[idx : idx + max_length]
__a : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__a : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__a : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__a : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__a : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
__a : List[str] = False
else:
__a : Optional[Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__a : Union[str, Any] = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
__a : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__a : Optional[int] = int(max_length / len(lowerCamelCase__ ) )
__a : int = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__a : Optional[Any] = int(max_length / len(lowerCamelCase__ ) )
__a : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__a : Any = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__a : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__a : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__a : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
__a : Any = truncation if truncation is not None else self.truncation
__a : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__a : int = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__a : int = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : int = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__a : List[str] = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__a : Optional[int] = []
__a : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__a : List[Any] = np.random.randint(0 , len(lowerCamelCase__ ) )
__a : List[str] = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__a : Dict = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__a : Any = [[longer] for longer in is_longer]
__a : str = {"input_features": input_mel, "is_longer": is_longer}
__a : Dict = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__a : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 718 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ = "src/diffusers"
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R"\[([^\]]+)\]")
def __magic_name__ ( _lowerCamelCase : int ):
__a : List[str] = _re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : str="" , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None ):
__a : Union[str, Any] = 0
__a : Optional[int] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
__a : Tuple = ["""\n""".join(lines[:index] )]
else:
__a : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a : str = [lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
__a : List[Any] = [lines[index + 1]]
index += 1
else:
__a : Optional[Any] = []
else:
blocks.append("""\n""".join(lowercase__ ) )
__a : List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append("""\n""".join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __magic_name__ ( _lowerCamelCase : List[str] ):
def _inner(_lowerCamelCase : Tuple ):
return key(lowercase__ ).lower().replace("""_""" , """""" )
return _inner
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any=None ):
def noop(_lowerCamelCase : Any ):
return x
if key is None:
__a : Union[str, Any] = noop
# Constants are all uppercase, they go first.
__a : Dict = [obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a : Optional[Any] = [obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__a : Dict = [obj for obj in objects if not key(lowercase__ )[0].isupper()]
__a : Tuple = ignore_underscore(lowercase__ )
return sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ )
def __magic_name__ ( _lowerCamelCase : Any ):
def _replace(_lowerCamelCase : Dict ):
__a : List[Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__a : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a : str = keys[:-1]
return "[" + ", ".join([F'''\"{k}\"''' for k in sort_objects(lowercase__ )] ) + "]"
__a : List[str] = import_statement.split("""\n""" )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a : Optional[Any] = 2 if lines[1].strip() == """[""" else 1
__a : Any = [(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a : Optional[Any] = sort_objects(lowercase__ , key=lambda _lowerCamelCase : x[1] )
__a : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__a : List[str] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a : int = keys[:-1]
__a : Any = get_indent(lines[1] ) + """, """.join([F'''\"{k}\"''' for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
__a : int = _re_bracket_content.sub(_replace , lowercase__ )
return import_statement
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : List[Any]=True ):
with open(lowercase__ , """r""" ) as f:
__a : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a : Dict = split_code_in_indented_blocks(
lowercase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a : List[str] = main_blocks[block_idx]
__a : Tuple = block.split("""\n""" )
# Get to the start of the imports.
__a : Dict = 0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a : Optional[int] = len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__a : Tuple = """\n""".join(block_lines[line_idx:-1] )
__a : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a : int = split_code_in_indented_blocks(lowercase__ , indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__a : Optional[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a : int = [(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a : List[str] = [(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
__a : Optional[Any] = [x[0] for x in sorted(lowercase__ , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a : str = 0
__a : Any = []
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__a : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
__a : List[Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(lowercase__ , """w""" ) as f:
f.write("""\n""".join(lowercase__ ) )
def __magic_name__ ( _lowerCamelCase : List[str]=True ):
__a : Optional[int] = []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
__a : Optional[int] = sort_imports(os.path.join(lowercase__ , """__init__.py""" ) , check_only=lowercase__ )
if result:
__a : Union[str, Any] = [os.path.join(lowercase__ , """__init__.py""" )]
if len(lowercase__ ) > 0:
raise ValueError(F'''Would overwrite {len(lowercase__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 719 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 0 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 4_0_0_0_0_0_0 ):
__a : List[Any] = [0, 1]
__a : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a : Any = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __magic_name__ ( _lowerCamelCase : str ):
if "://" in dataset_path:
__a : int = dataset_path.split("""://""" )[1]
return dataset_path
def __magic_name__ ( _lowerCamelCase : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __magic_name__ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ):
__a : str = not is_remote_filesystem(_UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCamelCase ) , fs._strip_protocol(_UpperCamelCase ) )
else:
fs.mv(_UpperCamelCase , _UpperCamelCase , recursive=_UpperCamelCase )
def __magic_name__ ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__a : Dict = None
__a : Optional[int] = None
__a : int = threading.Lock()
| 721 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 700 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def __magic_name__ ( _lowerCamelCase : Dict = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__a : Tuple = nums[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__a : int = nums[i]
__a : Optional[int] = max(_SCREAMING_SNAKE_CASE , ans + num , _SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ = int(input("Enter number of elements : ").strip())
lowercase__ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 701 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import os
from distutils.util import strtobool
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
for e in env_keys:
__a : Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=False ):
__a : Optional[Any] = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple="no" ):
__a : str = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 703 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = BlenderbotSmallConfig
_lowerCAmelCase = {}
_lowerCAmelCase = """gelu"""
def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ):
'''simple docstring'''
__a : Any = parent
__a : Optional[Any] = batch_size
__a : Optional[int] = seq_length
__a : Optional[int] = is_training
__a : Tuple = use_labels
__a : Dict = vocab_size
__a : Any = hidden_size
__a : int = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : List[Any] = eos_token_id
__a : List[Any] = pad_token_id
__a : str = bos_token_id
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a : int = prepare_blenderbot_small_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = TFBlenderbotSmallModel(config=lowercase__ ).get_decoder()
__a : List[str] = inputs_dict["""input_ids"""]
__a : int = input_ids[:1, :]
__a : List[Any] = inputs_dict["""attention_mask"""][:1, :]
__a : Optional[Any] = inputs_dict["""head_mask"""]
__a : List[str] = 1
# first forward pass
__a : Dict = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
__a : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
__a : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a : Optional[int] = model(lowercase__ , attention_mask=lowercase__ )[0]
__a : List[str] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
__a : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None , ):
if attention_mask is None:
__a : Optional[int] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_lowerCAmelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_lowerCAmelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFBlenderbotSmallModelTester(self )
__a : Tuple = ConfigTester(self , config_class=lowercase__ )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
_lowerCAmelCase = """facebook/blenderbot_small-90M"""
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.tokenizer(self.src_text , return_tensors="""tf""" )
__a : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase__ , )
__a : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 704 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=None , _lowercase=None ):
'''simple docstring'''
__a : Union[str, Any] = data
__a : str = previous
__a : Optional[int] = next_node
def __str__(self ):
'''simple docstring'''
return F'''{self.data}'''
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.data
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.next
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.previous
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : str = head
def __iter__(self ):
'''simple docstring'''
return self
def lowerCAmelCase__(self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__a : Dict = self.current.get_data()
__a : Union[str, Any] = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : Optional[Any] = None # First node in list
__a : Union[str, Any] = None # Last node in list
def __str__(self ):
'''simple docstring'''
__a : Tuple = self.head
__a : Any = []
while current is not None:
nodes.append(current.get_data() )
__a : Optional[Any] = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__(self , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.head
while current:
if current.get_data() == value:
return True
__a : Union[str, Any] = current.get_next()
return False
def __iter__(self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.head is None:
__a : Optional[int] = node
__a : Dict = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = node
__a : Dict = node.previous
if node.get_previous() is None:
__a : Optional[Any] = node_to_insert
else:
__a : Optional[Any] = node_to_insert
__a : int = node_to_insert
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = node
__a : Optional[int] = node.next
if node.get_next() is None:
__a : Union[str, Any] = node_to_insert
else:
__a : Dict = node_to_insert
__a : List[str] = node_to_insert
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = 1
__a : int = Node(__snake_case )
__a : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
__a : str = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.head
while node:
if node.get_data() == item:
return node
__a : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
__a : Optional[Any] = self.head.get_next()
if node == self.tail:
__a : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
if node.get_next():
__a : Any = node.previous
if node.get_previous():
__a : Any = node.next
__a : Any = None
__a : Tuple = None
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.head is None
def __magic_name__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] = 1_6_0_0_0 ):
__a : int = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
__a : Dict = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = field(default=_UpperCamelCase , metadata={"help": "Name of a dataset from the datasets package"} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "A file containing the training audio paths and labels."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "A file containing the validation audio paths and labels."} )
_lowerCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
_lowerCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
_lowerCAmelCase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
_lowerCAmelCase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_lowerCAmelCase = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_lowerCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Name or path of preprocessor config."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCAmelCase__(self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __magic_name__ ( ):
__a : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : int = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__a : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
__a : Dict = DatasetDict()
__a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__a : str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__a : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__a : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase : Dict ):
__a : Dict = []
for audio in batch[data_args.audio_column_name]:
__a : Dict = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
__a : List[Any] = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
__a : Optional[int] = {model_input_name: inputs.get(lowercase_ )}
__a : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase : Optional[Any] ):
__a : Optional[Any] = [audio["array"] for audio in batch[data_args.audio_column_name]]
__a : Optional[int] = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
__a : List[Any] = {model_input_name: inputs.get(lowercase_ )}
__a : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__a : Union[str, Any] = raw_datasets["train"].features[data_args.label_column_name].names
__a : int = {}, {}
for i, label in enumerate(lowercase_ ):
__a : int = str(lowercase_ )
__a : str = label
# Load the accuracy metric from the datasets package
__a : List[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : Tuple ):
__a : Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
__a : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__a : Union[str, Any] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__a : int = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
__a : Optional[Any] = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
__a : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__a : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Tuple = last_checkpoint
__a : List[str] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a : List[str] = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
__a : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : str ):
__a : Dict = checkpoint
__a : int = {}
__a : Any = vae_state_dict["""encoder.conv_in.weight"""]
__a : Optional[int] = vae_state_dict["""encoder.conv_in.bias"""]
__a : List[Any] = vae_state_dict["""encoder.conv_out.weight"""]
__a : Tuple = vae_state_dict["""encoder.conv_out.bias"""]
__a : List[str] = vae_state_dict["""encoder.norm_out.weight"""]
__a : Union[str, Any] = vae_state_dict["""encoder.norm_out.bias"""]
__a : Union[str, Any] = vae_state_dict["""decoder.conv_in.weight"""]
__a : Optional[int] = vae_state_dict["""decoder.conv_in.bias"""]
__a : List[str] = vae_state_dict["""decoder.conv_out.weight"""]
__a : Dict = vae_state_dict["""decoder.conv_out.bias"""]
__a : int = vae_state_dict["""decoder.norm_out.weight"""]
__a : Union[str, Any] = vae_state_dict["""decoder.norm_out.bias"""]
__a : Union[str, Any] = vae_state_dict["""quant_conv.weight"""]
__a : Dict = vae_state_dict["""quant_conv.bias"""]
__a : List[Any] = vae_state_dict["""post_quant_conv.weight"""]
__a : Optional[int] = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__a : Optional[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__a : Optional[Any] = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__a : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__a : Dict = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
__a : str = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__a : Any = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
__a : str = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
__a : int = renew_vae_resnet_paths(__UpperCamelCase )
__a : Dict = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
__a : List[Any] = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__a : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__a : int = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
__a : Tuple = renew_vae_resnet_paths(__UpperCamelCase )
__a : Any = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
__a : Union[str, Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__a : Dict = renew_vae_attention_paths(__UpperCamelCase )
__a : Tuple = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
__a : Union[str, Any] = num_up_blocks - 1 - i
__a : Optional[Any] = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__a : Dict = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
__a : Dict = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
__a : Tuple = renew_vae_resnet_paths(__UpperCamelCase )
__a : Optional[int] = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
__a : List[str] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__a : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__a : List[str] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
__a : str = renew_vae_resnet_paths(__UpperCamelCase )
__a : str = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
__a : int = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__a : Optional[int] = renew_vae_attention_paths(__UpperCamelCase )
__a : int = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , ):
# Only support V1
__a : List[Any] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__a : Optional[Any] = io.BytesIO(r.content )
__a : List[str] = OmegaConf.load(__UpperCamelCase )
__a : Tuple = 5_1_2
__a : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__a : Tuple = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__a : Any = f.get_tensor(__UpperCamelCase )
else:
__a : Optional[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
__a : List[str] = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
__a : List[Any] = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
__a : Dict = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowercase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
"""simple docstring"""
import math
class SCREAMING_SNAKE_CASE__ :
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = 0.0
__a : Dict = 0.0
for i in range(len(_lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for i in range(len(_lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __magic_name__ ( ):
__a : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a : Any = SelfOrganizingMap()
__a : List[Any] = 3
__a : str = 0.5
for _ in range(_lowerCamelCase ):
for j in range(len(_lowerCamelCase ) ):
# training sample
__a : Optional[Any] = training_samples[j]
# Compute the winning vector
__a : Optional[Any] = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase )
# Update the winning vector
__a : Optional[Any] = self_organizing_map.update(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# classify test sample
__a : Any = [0, 0, 0, 1]
__a : Union[str, Any] = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
_lowerCAmelCase = "swinv2"
_lowerCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=32 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_a )
__a : Optional[int] = image_size
__a : List[Any] = patch_size
__a : List[str] = num_channels
__a : Optional[Any] = embed_dim
__a : List[str] = depths
__a : List[Any] = len(_a )
__a : str = num_heads
__a : Optional[int] = window_size
__a : List[str] = mlp_ratio
__a : Union[str, Any] = qkv_bias
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[str] = drop_path_rate
__a : Optional[int] = hidden_act
__a : List[str] = use_absolute_embeddings
__a : Dict = layer_norm_eps
__a : Optional[Any] = initializer_range
__a : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a : str = int(embed_dim * 2 ** (len(_a ) - 1) )
__a : Tuple = (0, 0, 0, 0)
| 710 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def __magic_name__ ( ):
__a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__a : Dict = parser.parse_args()
return args.f
def __magic_name__ ( _lowerCamelCase : Any ):
__a : Tuple = {}
__a : Optional[int] = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
__a : Optional[int] = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def __magic_name__ ( ):
__a : str = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def lowerCAmelCase__(cls ):
'''simple docstring'''
__a : Any = tempfile.mkdtemp()
__a : List[Any] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__a : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCAmelCase__(cls ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.get_auto_remove_tmp_dir()
__a : Optional[int] = F'''\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
__a : Tuple = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.get_auto_remove_tmp_dir()
__a : int = F'''\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__a : Optional[Any] = get_results(UpperCamelCase__ )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.get_auto_remove_tmp_dir()
__a : str = F'''\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : Optional[int] = get_results(UpperCamelCase__ )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = 7 if get_gpu_count() > 1 else 2
__a : Dict = self.get_auto_remove_tmp_dir()
__a : Dict = F'''\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.get_auto_remove_tmp_dir()
__a : Any = F'''\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : Tuple = get_results(UpperCamelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : Dict = F'''\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : List[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.get_auto_remove_tmp_dir()
__a : Tuple = F'''\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : Tuple = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.get_auto_remove_tmp_dir()
__a : Optional[int] = F'''\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
__a : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """translation_no_trainer""" ) ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase__ )
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : List[Any] = F'''\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '''.split()
run_command(self._launch_args + testargs )
__a : Dict = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.get_auto_remove_tmp_dir()
__a : List[Any] = F'''\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
__a : Dict = get_results(UpperCamelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """image_classification_no_trainer""" ) ) )
| 711 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
__a : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space:
__a : int = getattr(_lowerCamelCase , pre_tok_state.pop("""type""" ) )
__a : Optional[Any] = add_prefix_space
__a : Tuple = pre_tok_class(**_lowerCamelCase )
__a : Dict = add_prefix_space
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__a : Any = input_ids[-self.model_max_length :]
return input_ids
| 712 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 0 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return number | (1 << position)
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return number & ~(1 << position)
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return number ^ (1 << position)
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return ((number >> position) & 1) == 1
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
_lowerCAmelCase = 'conditional_detr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__(self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=300 , _lowercase=6 , _lowercase=2048 , _lowercase=8 , _lowercase=6 , _lowercase=2048 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=0.25 , **_lowercase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__a : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
__a : Dict = backbone_config.get("""model_type""" )
__a : Tuple = CONFIG_MAPPING[backbone_model_type]
__a : List[str] = config_class.from_dict(__A )
__a : Tuple = use_timm_backbone
__a : Union[str, Any] = backbone_config
__a : Dict = num_channels
__a : Union[str, Any] = num_queries
__a : Dict = d_model
__a : int = encoder_ffn_dim
__a : List[str] = encoder_layers
__a : List[Any] = encoder_attention_heads
__a : Tuple = decoder_ffn_dim
__a : List[Any] = decoder_layers
__a : List[Any] = decoder_attention_heads
__a : Dict = dropout
__a : Any = attention_dropout
__a : Union[str, Any] = activation_dropout
__a : List[str] = activation_function
__a : Dict = init_std
__a : Union[str, Any] = init_xavier_std
__a : Dict = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : int = encoder_layers
__a : List[Any] = auxiliary_loss
__a : int = position_embedding_type
__a : Optional[int] = backbone
__a : str = use_pretrained_backbone
__a : List[Any] = dilation
# Hungarian matcher
__a : Optional[Any] = class_cost
__a : Optional[Any] = bbox_cost
__a : Tuple = giou_cost
# Loss coefficients
__a : Optional[Any] = mask_loss_coefficient
__a : str = dice_loss_coefficient
__a : Tuple = cls_loss_coefficient
__a : Optional[Any] = bbox_loss_coefficient
__a : Dict = giou_loss_coefficient
__a : Any = focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.d_model
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__a : Any = self.backbone_config.to_dict()
__a : Tuple = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
_lowerCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 12 | 714 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
_lowerCAmelCase = OpenAIGPTTokenizer
_lowerCAmelCase = OpenAIGPTTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__a : Optional[Any] = dict(zip(__A , range(len(__A ) ) ) )
__a : str = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a : str = """lower"""
__a : int = ["""low""", """er</w>"""]
__a : List[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__a : Dict = tokens + ["""<unk>"""]
__a : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def lowerCAmelCase__(self , _lowercase=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Tuple = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
__a : Union[str, Any] = """This is a simple input"""
__a : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__a : Any = ("""This is a simple input""", """This is a pair""")
__a : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
pass
| 715 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase=[32, 64, 128] , _lowercase=[1, 2, 1] , _lowercase=[2, 2, 4] , _lowercase=2 , _lowercase=2.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=True , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=10 , _lowercase=8 , _lowercase=["stage1", "stage2"] , _lowercase=[1, 2] , ):
'''simple docstring'''
__a : Any = parent
__a : Optional[Any] = batch_size
__a : Tuple = image_size
__a : Union[str, Any] = patch_size
__a : Tuple = num_channels
__a : Tuple = embed_dim
__a : List[str] = hidden_sizes
__a : int = depths
__a : str = num_heads
__a : Union[str, Any] = window_size
__a : Tuple = mlp_ratio
__a : List[Any] = qkv_bias
__a : List[str] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : Optional[int] = drop_path_rate
__a : Optional[Any] = hidden_act
__a : Any = use_absolute_embeddings
__a : Dict = patch_norm
__a : int = layer_norm_eps
__a : Union[str, Any] = initializer_range
__a : int = is_training
__a : Optional[int] = scope
__a : Any = use_labels
__a : List[str] = type_sequence_label_size
__a : List[Any] = encoder_stride
__a : Union[str, Any] = out_features
__a : Tuple = out_indices
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = FocalNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : str = model(__SCREAMING_SNAKE_CASE )
__a : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : Optional[int] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a : List[str] = None
__a : Optional[int] = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : List[str] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = FocalNetForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : Union[str, Any] = FocalNetForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = self.type_sequence_label_size
__a : int = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Dict = 1
__a : Optional[int] = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_lowerCAmelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = FocalNetModelTester(self )
__a : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__(self ):
'''simple docstring'''
return
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : str = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : List[str] = model_class(__SCREAMING_SNAKE_CASE )
__a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = outputs.hidden_states
__a : List[str] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
__a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__a , __a , __a , __a : Dict = reshaped_hidden_states[0].shape
__a : List[Any] = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a : Dict = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Any = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = 3
__a : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a : Dict = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[Any] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[str] = FocalNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__a : int = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__SCREAMING_SNAKE_CASE )
__a : Any = self.default_image_processor
__a : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__a : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__a : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__a : int = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
_lowerCAmelCase = (FocalNetBackbone,) if is_torch_available() else ()
_lowerCAmelCase = FocalNetConfig
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = FocalNetModelTester(self )
| 716 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.