code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = AudioLDMPipeline
_UpperCamelCase : int = TEXT_TO_AUDIO_PARAMS
_UpperCamelCase : Any = TEXT_TO_AUDIO_BATCH_PARAMS
_UpperCamelCase : str = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=snake_case , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase__ = ClapTextModelWithProjection(snake_case )
UpperCamelCase__ = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
UpperCamelCase__ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case , )
UpperCamelCase__ = SpeechTaHifiGan(snake_case )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def snake_case__ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(snake_case )
else:
UpperCamelCase__ = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase__ = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = audioldm_pipe(**snake_case )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 256
UpperCamelCase__ = audio[:10]
UpperCamelCase__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase__ = audioldm_pipe(**snake_case )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = 3 * [inputs.pop("prompt" )]
UpperCamelCase__ = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
UpperCamelCase__ = text_inputs["input_ids"].to(snake_case )
UpperCamelCase__ = audioldm_pipe.text_encoder(
snake_case , )
UpperCamelCase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase__ = F.normalize(snake_case , dim=-1 )
UpperCamelCase__ = prompt_embeds
# forward
UpperCamelCase__ = audioldm_pipe(**snake_case )
UpperCamelCase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = 3 * ["this is a negative prompt"]
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase__ = audioldm_pipe(**snake_case )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = 3 * [inputs.pop("prompt" )]
UpperCamelCase__ = []
for p in [prompt, negative_prompt]:
UpperCamelCase__ = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
UpperCamelCase__ = text_inputs["input_ids"].to(snake_case )
UpperCamelCase__ = audioldm_pipe.text_encoder(
snake_case , )
UpperCamelCase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase__ = F.normalize(snake_case , dim=-1 )
embeds.append(snake_case )
UpperCamelCase__, UpperCamelCase__ = embeds
# forward
UpperCamelCase__ = audioldm_pipe(**snake_case )
UpperCamelCase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case )
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = "egg cracking"
UpperCamelCase__ = audioldm_pipe(**snake_case , negative_prompt=snake_case )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 256
UpperCamelCase__ = audio[:10]
UpperCamelCase__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case )
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase__ = audioldm_pipe(snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe(snake_case , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = audioldm_pipe(audio_length_in_s=0.016 , **snake_case )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.016
UpperCamelCase__ = audioldm_pipe(audio_length_in_s=0.032 , **snake_case )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.032
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**snake_case )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = ["hey"]
UpperCamelCase__ = audioldm_pipe(snake_case , num_inference_steps=1 )
UpperCamelCase__ = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase__ = SpeechTaHifiGan(snake_case ).to(snake_case )
UpperCamelCase__ = audioldm_pipe(snake_case , num_inference_steps=1 )
UpperCamelCase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case )
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ):
'''simple docstring'''
UpperCamelCase__ = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase__ = np.random.RandomState(snake_case ).standard_normal((1, 8, 128, 16) )
UpperCamelCase__ = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
UpperCamelCase__ = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_inputs(snake_case )
UpperCamelCase__ = 25
UpperCamelCase__ = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 81920
UpperCamelCase__ = audio[77230:77240]
UpperCamelCase__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase__ = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_inputs(snake_case )
UpperCamelCase__ = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 81920
UpperCamelCase__ = audio[27780:27790]
UpperCamelCase__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 551
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_( _A :Tuple , _A :str )-> int:
# ===== initialization =====
UpperCamelCase__ = Mock()
UpperCamelCase__ = conn, Mock()
UpperCamelCase__ = iter([1, None] )
UpperCamelCase__ = lambda _A : next(_A )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 551
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Tuple = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 67
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67
| 1
|
# Lint as: python3
import itertools
import os
import re
_lowercase : int =re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_lowercase : int =re.compile(r"""([a-z\d])([A-Z])""")
_lowercase : List[str] =re.compile(r"""(?<!_)_(?!_)""")
_lowercase : str =re.compile(r"""(_{2,})""")
_lowercase : Optional[Any] =R'^\w+(\.\w+)*$'
_lowercase : Optional[Any] =R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = _uppercase_uppercase_re.sub(r'\1_\2' ,_lowerCamelCase )
lowerCamelCase_ : Any = _lowercase_uppercase_re.sub(r'\1_\2' ,_lowerCamelCase )
return name.lower()
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : str = _single_underscore_re.split(_lowerCamelCase )
lowerCamelCase_ : List[str] = [_multiple_underscores_re.split(_lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCamelCase ) if n != '' )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re ,_lowerCamelCase ):
raise ValueError(F"Split name should match \'{_split_re}\'\' but got \'{split}\'." )
return F"{filename_prefix_for_name(_lowerCamelCase )}-{split}"
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ : Tuple = filename_prefix_for_split(_lowerCamelCase ,_lowerCamelCase )
if filetype_suffix:
prefix += F".{filetype_suffix}"
lowerCamelCase_ : Optional[int] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
return F"{filepath}*"
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ : Optional[int] = filename_prefix_for_split(_lowerCamelCase ,_lowerCamelCase )
lowerCamelCase_ : Optional[Any] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if shard_lengths:
lowerCamelCase_ : str = len(_lowerCamelCase )
lowerCamelCase_ : int = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_lowerCamelCase )]
if filetype_suffix:
lowerCamelCase_ : str = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCamelCase_ : List[Any] = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['ConditionalDetrFeatureExtractor']
lowercase : int = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 557
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : Optional[int] = 'WhisperFeatureExtractor'
_lowerCAmelCase : Optional[Any] = 'WhisperTokenizer'
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE : Optional[Any] = False
def __lowercase ( self : List[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self : List[Any] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.pop('''audio''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = args[0]
SCREAMING_SNAKE_CASE : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE : str = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = encodings['''input_ids''']
return inputs
def __lowercase ( self : List[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 464
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : Tuple = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase_ ( UpperCamelCase__ ):
_A : Tuple = 42
_A : Dict = None
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=0.9_99 , lowerCAmelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase = []
for i in range(lowerCAmelCase ):
UpperCAmelCase = i / num_diffusion_timesteps
UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase ) / alpha_bar_fn(lowerCAmelCase ) , lowerCAmelCase ) )
return torch.tensor(lowerCAmelCase , dtype=torch.floataa )
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ = 10_00 , snake_case__ = "fixed_small_log" , snake_case__ = True , snake_case__ = 1.0 , snake_case__ = "epsilon" , snake_case__ = "squaredcos_cap_v2" , ) -> Any:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCAmelCase = betas_for_alpha_bar(__snake_case )
UpperCAmelCase = 1.0 - self.betas
UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase = 1.0
# setable values
UpperCAmelCase = None
UpperCAmelCase = torch.from_numpy(np.arange(0 , __snake_case )[::-1].copy() )
UpperCAmelCase = variance_type
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Any:
"""simple docstring"""
return sample
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = num_inference_steps
UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase = (np.arange(0 , __snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase = torch.from_numpy(__snake_case ).to(__snake_case )
def UpperCamelCase_ ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ) -> Optional[Any]:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase = torch.log(torch.clamp(__snake_case , min=1e-20 ) )
UpperCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase = variance.log()
UpperCAmelCase = beta.log()
UpperCAmelCase = (predicted_variance + 1) / 2
UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=None , snake_case__ = True , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase = torch.split(__snake_case , sample.shape[1] , dim=1 )
else:
UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
UpperCAmelCase = self.alphas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase = torch.clamp(
__snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase = 0
if t > 0:
UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__snake_case , device=model_output.device )
UpperCAmelCase = self._get_variance(
__snake_case , predicted_variance=__snake_case , prev_timestep=__snake_case , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase = variance
elif self.variance_type == "learned_range":
UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
UpperCAmelCase = variance * variance_noise
UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__snake_case , pred_original_sample=__snake_case )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase = timesteps.to(original_samples.device )
UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 673
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = KandinskyInpaintPipeline
snake_case__ : int = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
snake_case__ : Dict = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
snake_case__ : Any = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : Union[str, Any] = False
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return 32
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 100
@property
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__SCREAMING_SNAKE_CASE : List[str] = MultilingualCLIP(__snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = text_encoder.eval()
return text_encoder
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : int = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_unet
__SCREAMING_SNAKE_CASE : str = self.dummy_movq
__SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=__snake_case , )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self , a__ , a__=0 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__SCREAMING_SNAKE_CASE : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(np.uinta(__snake_case ) ).convert("RGB" ).resize((256, 256) )
# create mask
__SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : Tuple = 0
if str(__snake_case ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(__snake_case )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = '''cpu'''
__SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**__snake_case )
__SCREAMING_SNAKE_CASE : str = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(__snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images
__SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__SCREAMING_SNAKE_CASE : str = np.ones((768, 768) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Dict = '''a hat'''
__SCREAMING_SNAKE_CASE : str = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : List[str] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
"Wrong input data's dimensions... "
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE : Optional[int] = (
"Wrong input data's shape... "
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE : Dict = (
"Input data have different datatype... "
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for value in value_array:
__SCREAMING_SNAKE_CASE : int = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
__SCREAMING_SNAKE_CASE : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE : List[str] = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE : str = temp_dist
__SCREAMING_SNAKE_CASE : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564
| 0
|
def _A ( __snake_case :list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__SCREAMING_SNAKE_CASE = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
|
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =field(default=_lowerCAmelCase , metadata={"help": "Whether to use SortishSampler or not."} )
__a =field(
default=_lowerCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__a =field(
default=_lowerCAmelCase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
__a =field(
default=_lowerCAmelCase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
__a =field(
default=_lowerCAmelCase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = v.to_dict()
return d
| 270
|
'''simple docstring'''
__UpperCamelCase : Tuple = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 270
| 1
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
| 23
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase_ : str = '\\n Text data.\n Second line of data.'
lowerCAmelCase_ : Union[str, Any] = 'file'
@pytest.fixture(scope='''session''' )
def UpperCAmelCase ( A : Any ):
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
SCREAMING_SNAKE_CASE : Tuple = bytes(A , '''utf-8''' )
with zstd.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture
def UpperCAmelCase ( A : Union[str, Any] ):
with open(os.path.join(tmpfs.local_root_dir , A ) , '''w''' ) as f:
f.write(A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def UpperCAmelCase ( A : Dict , A : Dict , A : str , A : Any , A : List[Any] , A : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
SCREAMING_SNAKE_CASE : List[Any] = input_paths[compression_format]
SCREAMING_SNAKE_CASE : List[str] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : int = DownloadConfig(cache_dir=A , extract_compressed_file=A )
SCREAMING_SNAKE_CASE : List[str] = cached_path(A , download_config=A )
with open(A ) as f:
SCREAMING_SNAKE_CASE : int = f.read()
with open(A ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def UpperCAmelCase ( A : Optional[Any] , A : Any , A : List[Any] , A : Optional[int] , A : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = '''custom_cache'''
SCREAMING_SNAKE_CASE : Dict = '''custom_extracted_dir'''
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / '''custom_extracted_path'''
if default_extracted:
SCREAMING_SNAKE_CASE : Any = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(A ) )
SCREAMING_SNAKE_CASE : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE : Any = xz_file
SCREAMING_SNAKE_CASE : Union[str, Any] = (
DownloadConfig(extract_compressed_file=A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=A )
)
SCREAMING_SNAKE_CASE : Optional[Any] = cached_path(A , download_config=A )
assert Path(A ).parent.parts[-2:] == expected
def UpperCAmelCase ( A : Tuple ):
# absolute path
SCREAMING_SNAKE_CASE : Any = str(Path(A ).resolve() )
assert cached_path(A ) == text_file
# relative path
SCREAMING_SNAKE_CASE : Union[str, Any] = str(Path(A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(A ) == text_file
def UpperCAmelCase ( A : Tuple ):
# absolute path
SCREAMING_SNAKE_CASE : List[str] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(A ):
cached_path(A )
# relative path
SCREAMING_SNAKE_CASE : str = '''./__missing_file__.txt'''
with pytest.raises(A ):
cached_path(A )
def UpperCAmelCase ( A : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(A ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , A )
def UpperCAmelCase ( ):
with pytest.raises(A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , A )
def UpperCAmelCase ( A : Any ):
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A ):
http_get('''https://huggingface.co''' , temp_file=A )
with pytest.raises(A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , A )
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A ):
ftp_get('''ftp://huggingface.co''' , temp_file=A )
with pytest.raises(A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , A )
def UpperCAmelCase ( A : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(A ):
fsspec_get('''s3://huggingface.co''' , temp_file=A )
with pytest.raises(A ):
fsspec_head('''s3://huggingface.co''' )
| 464
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = XLMRobertaTokenizer
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = True
def __lowercase ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = '''<pad>'''
SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 10_02 )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __lowercase ( self : Tuple ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : Tuple = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Any = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@cached_property
def __lowercase ( self : Tuple ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = '''Hello World!'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
SCREAMING_SNAKE_CASE : Any = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __lowercase ( self : List[str] ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 464
| 1
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'MCTCTFeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'AutoTokenizer'
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = self.feature_extractor
a :Union[str, Any] = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
a :str = kwargs.pop('''raw_speech''' )
else:
a :Tuple = kwargs.pop('''audio''' , _lowerCamelCase )
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
a :List[Any] = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :Union[str, Any] = args[0]
a :Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a :Optional[Any] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
a :List[str] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a :Optional[int] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
a :Union[str, Any] = kwargs.pop('''input_features''' , _lowerCamelCase )
a :Dict = kwargs.pop('''labels''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :Dict = args[0]
a :List[Any] = args[1:]
if input_features is not None:
a :Tuple = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
a :Union[str, Any] = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a :Tuple = labels['''input_ids''']
return input_features
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
a :List[str] = True
a :int = self.tokenizer
yield
a :List[Any] = self.feature_extractor
a :Tuple = False
| 445
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CanineTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
a :Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
a :List[Any] = 1024
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.canine_tokenizer
a :Tuple = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
a :Any = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a :Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a :str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.canine_tokenizer
a :Optional[int] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
a :Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , _lowerCamelCase )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertIn('''token_type_ids''' , _lowerCamelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.canine_tokenizer
a :str = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
a :List[str] = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding='''max_length''' , truncation=_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE__ ( self ):
# safety check on max_len default value so we are sure the test works
a :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a :List[str] = tempfile.mkdtemp()
a :Any = ''' He is very happy, UNwant\u00E9d,running'''
a :Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
a :Optional[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
a :str = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
a :str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a :Any = tempfile.mkdtemp()
a :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
a :Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a :str = chr(0Xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
a :Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
a :int = tokenizer.__class__.from_pretrained(_lowerCamelCase )
a :Dict = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn(_lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a :str = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
a :Tuple = 0Xe_0_0_5
a :Optional[int] = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCamelCase )
a :List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , input_encoded + special_token_id )
a :Any = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :int = chr(0Xe_0_0_5 )
a :str = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
a :Optional[int] = tokenizer.tokenize(_lowerCamelCase )
a :Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCamelCase )
self.assertEqual(token_a[0] , _lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
a :Optional[int] = 0Xe_0_0_6
a :List[str] = chr(_lowerCamelCase )
a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
a :Optional[Any] = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
a :Tuple = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
a :int = 0Xe_0_0_6
a :Optional[Any] = chr(_lowerCamelCase )
a :Union[str, Any] = [new_token_a]
a :Optional[int] = [new_token_a]
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a :str = tokenizer_class.from_pretrained(_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a :Optional[int] = 0Xe_0_0_7
a :Any = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a :Tuple = [AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )]
a :Optional[Any] = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :List[str] = '''hello world'''
if self.space_between_special_tokens:
a :Optional[Any] = '''[CLS] hello world [SEP]'''
else:
a :Tuple = input
a :Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :List[Any] = tokenizer.decode(_lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase , [output, output.lower()] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
a :str = '''a'''
a :List[Any] = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [] )
a :List[Any] = 0Xe_0_0_6
a :str = chr(_lowerCamelCase )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 445
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[str]=1_3, _UpperCAmelCase : List[Any]=6_4, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Any=3, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : str=5, _UpperCAmelCase : str=4, _UpperCAmelCase : Optional[int]=3_7, _UpperCAmelCase : int="gelu", _UpperCAmelCase : int=0.1, _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : Any=1_0, _UpperCAmelCase : List[str]=0.02, _UpperCAmelCase : List[Any]=[1, 1_6, 4, 4], _UpperCAmelCase : Tuple=None, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
SCREAMING_SNAKE_CASE__ : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.image_size // 3_2) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_patches + 1
def A_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=_UpperCAmelCase, )
def A_ ( self : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTHybridModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = ViTHybridForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Dict ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A_ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def A_ ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE__ : Dict = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = ViTHybridModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
@slow
@require_accelerate
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE__ : Tuple = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384", device_map="auto" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE__ : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], "tabby, tabby cat" )
| 157
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Optional[int] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lm_head"
SCREAMING_SNAKE_CASE__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Dict = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : str = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : List[str] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : str = "weight"
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Any = name.split("." )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : str = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : Dict = 42
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 157
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
_SCREAMING_SNAKE_CASE = xgb.predict(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = fetch_california_housing()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = data_handling(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
_SCREAMING_SNAKE_CASE = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}" )
print(F"Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 591
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a_ : str = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ , a__ = {}, {}
if padding is not None:
a__ = padding
if truncation is not None:
a__ = truncation
if top_k is not None:
a__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = {'''image''': image, '''question''': question}
else:
a__ = image
a__ = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Dict:
a__ = load_image(inputs['''image'''] )
a__ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
a__ = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
a__ = self.model.config.num_labels
if self.framework == "pt":
a__ = model_outputs.logits.sigmoid()[0]
a__ , a__ = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
a__ = scores.tolist()
a__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 194
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case_ ( unittest.TestCase ):
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : Any = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def snake_case_ ( self ):
a_ : Any = self.dummy_uncond_unet
a_ : Any = PNDMScheduler()
a_ : List[Any] = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
a_ : Tuple = torch.manual_seed(0 )
a_ : Union[str, Any] = pndm(generator=a_ , num_inference_steps=2_0 , output_type="numpy" ).images
a_ : int = torch.manual_seed(0 )
a_ : Optional[int] = pndm(generator=a_ , num_inference_steps=2_0 , output_type="numpy" , return_dict=a_ )[0]
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a_ : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : Optional[Any] = "google/ddpm-cifar10-32"
a_ : Optional[int] = UNetaDModel.from_pretrained(a_ )
a_ : Union[str, Any] = PNDMScheduler()
a_ : List[Any] = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
a_ : Optional[int] = torch.manual_seed(0 )
a_ : List[Any] = pndm(generator=a_ , output_type="numpy" ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a_ : Optional[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class UpperCamelCase ( snake_case__ ):
def __init__( self : Union[str, Any] ,**_lowerCAmelCase : str ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
requires_backends(self ,"vision" )
requires_backends(self ,"torch" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_lowerCAmelCase )
def UpperCamelCase_ ( self : Dict ,**_lowerCAmelCase : List[str] ):
"""simple docstring"""
__snake_case = {}
__snake_case = {}
__snake_case = {}
# preprocess args
if "points_per_batch" in kwargs:
__snake_case = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
__snake_case = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
__snake_case = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
__snake_case = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
__snake_case = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
__snake_case = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
__snake_case = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
__snake_case = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
__snake_case = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
__snake_case = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
__snake_case = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
__snake_case = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Dict ,_lowerCAmelCase : Optional[Any] ,*_lowerCAmelCase : List[Any] ,_lowerCAmelCase : Dict=None ,_lowerCAmelCase : int=None ,**_lowerCAmelCase : str ):
"""simple docstring"""
return super().__call__(_lowerCAmelCase ,*_lowerCAmelCase ,num_workers=_lowerCAmelCase ,batch_size=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : List[Any] ,_lowerCAmelCase : Optional[Any]=64 ,_lowerCAmelCase : int = 0 ,_lowerCAmelCase : float = 512 / 1_500 ,_lowerCAmelCase : Optional[int] = 32 ,_lowerCAmelCase : Optional[int] = 1 ,):
"""simple docstring"""
__snake_case = load_image(_lowerCAmelCase )
__snake_case = self.image_processor.size["longest_edge"]
__snake_case , __snake_case , __snake_case , __snake_case = self.image_processor.generate_crop_boxes(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = self.image_processor(images=_lowerCAmelCase ,return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
__snake_case = self.get_inference_context()
with inference_context():
__snake_case = self._ensure_tensor_on_device(_lowerCAmelCase ,device=self.device )
__snake_case = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
__snake_case = image_embeddings
__snake_case = grid_points.shape[1]
__snake_case = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = grid_points[:, i : i + points_per_batch, :, :]
__snake_case = input_labels[:, i : i + points_per_batch]
__snake_case = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Any ,_lowerCAmelCase : Union[str, Any]=0.8_8 ,_lowerCAmelCase : int=0.9_5 ,_lowerCAmelCase : int=0 ,_lowerCAmelCase : List[Any]=1 ,):
"""simple docstring"""
__snake_case = model_inputs.pop("input_boxes" )
__snake_case = model_inputs.pop("is_last" )
__snake_case = model_inputs.pop("original_sizes" ).tolist()
__snake_case = model_inputs.pop("reshaped_input_sizes" ).tolist()
__snake_case = self.model(**_lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__snake_case = model_outputs["pred_masks"]
__snake_case = self.image_processor.post_process_masks(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,binarize=_lowerCAmelCase )
__snake_case = model_outputs["iou_scores"]
__snake_case , __snake_case , __snake_case = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Optional[int]=False ,_lowerCAmelCase : List[str]=False ,_lowerCAmelCase : List[Any]=0.7 ,):
"""simple docstring"""
__snake_case = []
__snake_case = []
__snake_case = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
__snake_case = torch.cat(_lowerCAmelCase )
__snake_case = torch.cat(_lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = self.image_processor.post_process_for_mask_generation(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = defaultdict(_lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_lowerCAmelCase )
__snake_case = {}
if output_rle_mask:
__snake_case = rle_mask
if output_bboxes_mask:
__snake_case = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 524
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case = tokenizer("Hello there" ,return_tensors="np" ).input_ids
__snake_case = tokenizer("Hi I am" ,return_tensors="np" ).input_ids
__snake_case = shift_tokens_right(_lowerCAmelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
__snake_case = model(_lowerCAmelCase ,decoder_input_ids=_lowerCAmelCase ).logits
__snake_case = optax.softmax_cross_entropy(_lowerCAmelCase ,onehot(_lowerCAmelCase ,logits.shape[-1] ) ).mean()
__snake_case = -(labels.shape[-1] * loss.item())
__snake_case = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 524
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : List[str] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 717
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = """hf-internal-testing/tiny-random-t5"""
snake_case_ : str = AutoTokenizer.from_pretrained(_UpperCamelCase )
snake_case_ : str = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
snake_case_ : List[str] = tokenizer("""This is me""" ,return_tensors="""pt""" )
snake_case_ : List[str] = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : List[str] = model.generate(**_UpperCamelCase )
snake_case_ : Dict = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase ,_UpperCamelCase ) )
def a__ ( self :Union[str, Any] ):
snake_case_ : Optional[Any] = """hf-internal-testing/tiny-random-t5"""
snake_case_ : str = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
snake_case_ : List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_UpperCamelCase ):
model.save_pretrained(_UpperCamelCase )
snake_case_ : Any = model.reverse_bettertransformer()
model.save_pretrained(_UpperCamelCase )
| 267
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__SCREAMING_SNAKE_CASE = nn.Parameter(__UpperCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__SCREAMING_SNAKE_CASE = nn.Parameter(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__UpperCAmelCase ).view(-1 , __UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__UpperCAmelCase ).view(-1 , __UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = weights[0][0][0]
__SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__UpperCAmelCase ) , torch.tensor(__UpperCAmelCase ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE = weights[0][1]
if len(__UpperCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__UpperCAmelCase , torch_block.attention , __UpperCAmelCase )
else:
set_layer_weights_in_torch_local(__UpperCAmelCase , torch_block.attention , __UpperCAmelCase )
# intermediate weighs
__SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(__UpperCAmelCase ) == 4:
__SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__UpperCAmelCase ) , torch.tensor(__UpperCAmelCase ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCAmelCase ) , )
# intermediate out
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCAmelCase ) , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__UpperCAmelCase ) , )
if isinstance(weights[3] , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__UpperCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# output layer norm
__SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__UpperCAmelCase ) , torch.tensor(__UpperCAmelCase ) , )
# output embeddings
__SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCAmelCase ) , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(__UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(__UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pickle.load(__UpperCAmelCase )["""weights"""]
set_model_weights_in_torch(__UpperCAmelCase , __UpperCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 109
|
"""simple docstring"""
UpperCamelCase__ :Tuple = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ :List[str] = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset([])
UpperCamelCase__ :Union[str, Any] = frozenset(["""image"""])
UpperCamelCase__ :str = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :Any = frozenset(["""image"""])
UpperCamelCase__ :Optional[int] = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ :int = frozenset(["""prompt""", """image""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ :Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
UpperCamelCase__ :Dict = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :Dict = frozenset(["""image""", """mask_image"""])
UpperCamelCase__ :Tuple = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :List[str] = frozenset(["""example_image""", """image""", """mask_image"""])
UpperCamelCase__ :List[str] = frozenset(["""class_labels"""])
UpperCamelCase__ :Union[str, Any] = frozenset(["""class_labels"""])
UpperCamelCase__ :Any = frozenset(["""batch_size"""])
UpperCamelCase__ :Optional[Any] = frozenset([])
UpperCamelCase__ :Optional[Any] = frozenset(["""batch_size"""])
UpperCamelCase__ :Optional[int] = frozenset([])
UpperCamelCase__ :str = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ :Union[str, Any] = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset(["""input_tokens"""])
UpperCamelCase__ :int = frozenset(["""input_tokens"""])
| 355
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : Optional[Any] = AltDiffusionPipeline
a : Optional[int] = TEXT_TO_IMAGE_PARAMS
a : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__lowerCAmelCase = CLIPTextModel(UpperCamelCase )
__lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__lowerCAmelCase = 77
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Dict:
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(UpperCamelCase )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**UpperCamelCase )
__lowerCAmelCase = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase )
__lowerCAmelCase = "A photo of an astronaut"
__lowerCAmelCase = alt_pipe(**UpperCamelCase )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase )
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(UpperCamelCase )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**UpperCamelCase )
__lowerCAmelCase = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase )
__lowerCAmelCase = alt_pipe(**UpperCamelCase )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> str:
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=UpperCamelCase )
__lowerCAmelCase = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase )
__lowerCAmelCase = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="numpy" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40
|
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ : int = str(bin(lowerCAmelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ : Union[str, Any] = str(bin(lowerCAmelCase__ ) )[2:]
if shift_amount >= len(lowerCAmelCase__ ):
return "0b0"
lowerCamelCase_ : List[str] = binary_number[: len(lowerCAmelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number >= 0: # Get binary representation of positive number
lowerCamelCase_ : List[Any] = '0' + str(bin(lowerCAmelCase__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCamelCase_ : Any = len(bin(lowerCAmelCase__ )[3:] ) # Find 2's complement of number
lowerCamelCase_ : List[str] = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
lowerCamelCase_ : List[str] = (
'1' + '0' * (binary_number_length - len(lowerCAmelCase__ )) + binary_number
)
if shift_amount >= len(lowerCAmelCase__ ):
return "0b" + binary_number[0] * len(lowerCAmelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCAmelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gpt_neo'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self , a_=5_02_57 , a_=20_48 , a_=20_48 , a_=24 , a_=[[["global", "local"], 12]] , a_=16 , a_=None , a_=2_56 , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=5_02_56 , a_=5_02_56 , **a_ , ):
'''simple docstring'''
__snake_case : Tuple = vocab_size
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = window_size
__snake_case : Dict = activation_function
__snake_case : Dict = resid_dropout
__snake_case : Union[str, Any] = embed_dropout
__snake_case : str = attention_dropout
__snake_case : Any = classifier_dropout
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Optional[int] = initializer_range
__snake_case : List[str] = use_cache
__snake_case : Union[str, Any] = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
__snake_case : List[Any] = attention_types
__snake_case : Dict = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
__snake_case : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : List[str] ) ->int:
"""simple docstring"""
import torch
__snake_case : Union[str, Any] = input.size()
__snake_case : Dict = len(_snake_case )
__snake_case : Tuple = shape[dimension]
__snake_case : Dict = torch.arange(0 , _snake_case , _snake_case )
__snake_case : Union[str, Any] = torch.div(sizedim - size , _snake_case , rounding_mode='''floor''' ) + 1
__snake_case : List[str] = torch.arange(_snake_case ) + low_indices[:min_length][:, None]
__snake_case : Optional[Any] = [slice(_snake_case )] * rank
__snake_case : Union[str, Any] = indices
__snake_case : Any = input[s]
__snake_case : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_snake_case )
def lowercase ( _snake_case : Dict , _snake_case : str ) ->int:
"""simple docstring"""
import torch
__snake_case : List[Any] = torch.arange(1 , _snake_case )
__snake_case : int = torch.remainder(_snake_case , _snake_case )
__snake_case : Dict = remainders == 0
__snake_case : List[Any] = candidates[divisor_indices]
__snake_case : Optional[Any] = torch.max(_snake_case )
return largest_divisor, torch.div(_snake_case , _snake_case , rounding_mode='''floor''' )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='''inputs''' )
__snake_case : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__snake_case : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._config.num_heads
def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
'''simple docstring'''
__snake_case : Any = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
__snake_case : str = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case : List[str] = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : Tuple = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
__snake_case : Tuple = common_inputs['''attention_mask''']
if self.use_past:
__snake_case : Dict = ordered_inputs['''attention_mask'''].dtype
__snake_case : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 13
| 714
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase ( _snake_case : int=None ) ->Optional[int]:
"""simple docstring"""
if subparsers is not None:
__snake_case : List[Any] = subparsers.add_parser('''test''' )
else:
__snake_case : Optional[int] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=_snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def lowercase ( _snake_case : Optional[Any] ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__snake_case : List[str] = script_name
else:
__snake_case : Dict = f"""--config_file={args.config_file} {script_name}"""
__snake_case : str = ['''accelerate-launch'''] + test_args.split()
__snake_case : Optional[int] = execute_subprocess_async(_snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def lowercase ( ) ->Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = test_command_parser()
__snake_case : Dict = parser.parse_args()
test_command(_snake_case )
if __name__ == "__main__":
main()
| 229
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 76
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCamelCase : Dict = flatten_dict(snake_case_ )
return flax_params
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Tuple = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
UpperCamelCase : Optional[int] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase : List[str] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase : List[str] = new_key.replace(snake_case_ ,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase : str = new_key.replace(snake_case_ ,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase : List[str] = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Union[str, Any] = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase : Any = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Any = flax_dict[key]
UpperCamelCase : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Dict ,snake_case_ : str=False ,snake_case_ : Dict=False ):
'''simple docstring'''
UpperCamelCase : Optional[int] = get_flax_param(snake_case_ )
if not use_large:
UpperCamelCase : List[str] = PixaStructVisionConfig()
UpperCamelCase : int = PixaStructTextConfig()
else:
UpperCamelCase : List[str] = PixaStructVisionConfig(
hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_attention_heads=2_4 ,num_hidden_layers=1_8 )
UpperCamelCase : Tuple = PixaStructTextConfig(hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_heads=2_4 ,num_layers=1_8 )
UpperCamelCase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=snake_case_ )
UpperCamelCase : Optional[int] = PixaStructForConditionalGeneration(snake_case_ )
UpperCamelCase : Optional[Any] = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
UpperCamelCase : Dict = PixaStructImageProcessor()
UpperCamelCase : List[str] = PixaStructProcessor(image_processor=snake_case_ ,tokenizer=snake_case_ )
if use_large:
UpperCamelCase : int = 4_0_9_6
UpperCamelCase : int = True
# mkdir if needed
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 499
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase ( a , a , a , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase ( a , a , a , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
a , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
_lowerCAmelCase = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 245
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCamelCase : Any =HUGGINGFACE_HUB_CACHE
_UpperCamelCase : List[str] ="config.json"
_UpperCamelCase : Union[str, Any] ="diffusion_pytorch_model.bin"
_UpperCamelCase : List[str] ="diffusion_flax_model.msgpack"
_UpperCamelCase : Any ="model.onnx"
_UpperCamelCase : List[Any] ="diffusion_pytorch_model.safetensors"
_UpperCamelCase : str ="weights.pb"
_UpperCamelCase : Union[str, Any] ="https://huggingface.co"
_UpperCamelCase : Any =default_cache_path
_UpperCamelCase : List[str] ="diffusers_modules"
_UpperCamelCase : Optional[Any] =os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_UpperCamelCase : str =["fp16", "non-ema"]
_UpperCamelCase : str =".self_attn"
| 316
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase = True
except (ImportError, AttributeError):
lowercase = object
def __UpperCAmelCase ( *a_ , **a_):
pass
lowercase = False
lowercase = logging.get_logger("transformers-cli/serving")
def __UpperCAmelCase ( a_):
snake_case_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__SCREAMING_SNAKE_CASE , args.host , args.port , args.workers)
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( a ) -> Dict:
snake_case_ = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=a , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=a , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=a , default=88_88 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=a , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=a , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=a , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=a , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=a )
def __init__( self , a , a , a , a ) -> Optional[int]:
snake_case_ = pipeline
snake_case_ = host
snake_case_ = port
snake_case_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install \"transformers[serving]\".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case_ = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=a , response_class=a , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=a , response_class=a , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=a , response_class=a , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=a , response_class=a , methods=['POST'] , ),
] , timeout=6_00 , )
def _UpperCamelCase ( self ) -> Dict:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _UpperCamelCase ( self ) -> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCamelCase ( self , a = Body(a , embed=a ) , a = Body(a , embed=a ) ) -> str:
try:
snake_case_ = self._pipeline.tokenizer.tokenize(a )
if return_ids:
snake_case_ = self._pipeline.tokenizer.convert_tokens_to_ids(a )
return ServeTokenizeResult(tokens=a , tokens_ids=a )
else:
return ServeTokenizeResult(tokens=a )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(a )} )
def _UpperCamelCase ( self , a = Body(a , embed=a ) , a = Body(a , embed=a ) , a = Body(a , embed=a ) , ) -> List[Any]:
try:
snake_case_ = self._pipeline.tokenizer.decode(a , a , a )
return ServeDeTokenizeResult(model='' , text=a )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(a )} )
async def _UpperCamelCase ( self , a=Body(a , embed=a ) ) -> List[str]:
# Check we don't have empty string
if len(a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case_ = self._pipeline(a )
return ServeForwardResult(output=a )
except Exception as e:
raise HTTPException(5_00 , {'error': str(a )} )
| 701
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = KandinskyImgaImgPipeline
lowerCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def _UpperCamelCase ( self ) -> Optional[int]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Dict:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return 1_00
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ = MultilingualCLIP(a )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case_ = UNetaDConditionModel(**a )
return model
@property
def _UpperCamelCase ( self ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
snake_case_ = DDIMScheduler(**a )
snake_case_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , a , a=0 ) -> str:
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(a ).startswith('mps' ):
snake_case_ = torch.manual_seed(a )
else:
snake_case_ = torch.Generator(device=a ).manual_seed(a )
snake_case_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> int:
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**a )
snake_case_ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case_ = pipe(**self.get_dummy_inputs(a ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case_ = 'A red cartoon frog, 4k'
snake_case_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case_ = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
| 607
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = np.shape(lowerCAmelCase )
if rows != columns:
_lowerCAmelCase = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = np.zeros((rows, columns) )
_lowerCAmelCase = np.zeros((rows, columns) )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowerCAmelCase = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase = 1
for j in range(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
_lowerCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(A ) )
_UpperCAmelCase = os.path.join(A , 'triangle.txt' )
with open(A ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
for line in triangle:
_UpperCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(A ) )
a.append(A )
for i in range(1 , len(A ) ):
for j in range(len(a[i] ) ):
_UpperCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
_UpperCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A , A )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 24
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__a = '\\n Text data.\n Second line of data.'
__a = 'file'
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase_ = bytes(snake_case__ , '''utf-8''' )
with zstd.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , snake_case__ ) , '''w''' ) as f:
f.write(snake_case__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def a ( snake_case__: List[Any] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase_ = input_paths[compression_format]
lowercase_ = tmp_path / '''cache'''
lowercase_ = DownloadConfig(cache_dir=snake_case__ , extract_compressed_file=snake_case__ )
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
with open(snake_case__ ) as f:
lowercase_ = f.read()
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def a ( snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = '''custom_cache'''
lowercase_ = '''custom_extracted_dir'''
lowercase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , snake_case__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case__ ) )
lowercase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase_ = xz_file
lowercase_ = (
DownloadConfig(extract_compressed_file=snake_case__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case__ )
)
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
assert Path(snake_case__ ).parent.parts[-2:] == expected
def a ( snake_case__: Any ):
'''simple docstring'''
# absolute path
lowercase_ = str(Path(snake_case__ ).resolve() )
assert cached_path(snake_case__ ) == text_file
# relative path
lowercase_ = str(Path(snake_case__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case__ ) == text_file
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
# absolute path
lowercase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
# relative path
lowercase_ = '''./__missing_file__.txt'''
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
http_get('''https://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
fsspec_head('''s3://huggingface.co''' )
| 97
|
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : str = len(lowercase )
for _ in range(lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCamelCase = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 243
| 0
|
from __future__ import annotations
import math
def A_ ( snake_case ):
if num <= 0:
SCREAMING_SNAKE_CASE:Any = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(snake_case__ )
SCREAMING_SNAKE_CASE:List[str] = [True] * (num + 1)
SCREAMING_SNAKE_CASE:Dict = []
SCREAMING_SNAKE_CASE:Dict = 2
SCREAMING_SNAKE_CASE:Any = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE:Optional[int] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 700
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : List[Any] = '''mask2former'''
_A : str = ['''swin''']
_A : Tuple = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 1_024 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 10 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_048 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 255 ,SCREAMING_SNAKE_CASE__ : int = 100 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 12_544 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
SCREAMING_SNAKE_CASE:Union[str, Any] = CONFIG_MAPPING["swin"](
image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE:List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE:List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
SCREAMING_SNAKE_CASE:List[str] = backbone_config
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE:Union[str, Any] = mask_feature_size
SCREAMING_SNAKE_CASE:str = hidden_dim
SCREAMING_SNAKE_CASE:Optional[int] = encoder_feedforward_dim
SCREAMING_SNAKE_CASE:Tuple = activation_function
SCREAMING_SNAKE_CASE:Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE:List[Any] = decoder_layers
SCREAMING_SNAKE_CASE:Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE:Dict = dropout
SCREAMING_SNAKE_CASE:List[str] = dim_feedforward
SCREAMING_SNAKE_CASE:int = pre_norm
SCREAMING_SNAKE_CASE:Union[str, Any] = enforce_input_projection
SCREAMING_SNAKE_CASE:Any = common_stride
SCREAMING_SNAKE_CASE:int = ignore_value
SCREAMING_SNAKE_CASE:List[Any] = num_queries
SCREAMING_SNAKE_CASE:Dict = no_object_weight
SCREAMING_SNAKE_CASE:str = class_weight
SCREAMING_SNAKE_CASE:Tuple = mask_weight
SCREAMING_SNAKE_CASE:Optional[int] = dice_weight
SCREAMING_SNAKE_CASE:int = train_num_points
SCREAMING_SNAKE_CASE:str = oversample_ratio
SCREAMING_SNAKE_CASE:str = importance_sample_ratio
SCREAMING_SNAKE_CASE:str = init_std
SCREAMING_SNAKE_CASE:Any = init_xavier_std
SCREAMING_SNAKE_CASE:List[Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_strides
SCREAMING_SNAKE_CASE:Union[str, Any] = output_auxiliary_logits
SCREAMING_SNAKE_CASE:Dict = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __UpperCamelCase ( cls : Tuple ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:str = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE:List[str] = self.__class__.model_type
return output
| 465
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( __snake_case , unittest.TestCase ):
lowercase : Optional[Any] = KandinskyVaaInpaintPipeline
lowercase : Union[str, Any] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase : Optional[int] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : List[str] = False
@property
def a__ ( self :Optional[Any] ):
return 3_2
@property
def a__ ( self :int ):
return 3_2
@property
def a__ ( self :int ):
return self.time_input_dim
@property
def a__ ( self :Dict ):
return self.time_input_dim * 4
@property
def a__ ( self :Optional[Any] ):
return 1_0_0
@property
def a__ ( self :Optional[Any] ):
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def a__ ( self :Any ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self :List[Any] ):
torch.manual_seed(0 )
snake_case_ : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self :str ):
snake_case_ : List[Any] = self.dummy_unet
snake_case_ : Dict = self.dummy_movq
snake_case_ : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule="""linear""" ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=lowerCAmelCase_ ,set_alpha_to_one=lowerCAmelCase_ ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=lowerCAmelCase_ ,)
snake_case_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :str=0 ):
snake_case_ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
snake_case_ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase_ )
# create init_image
snake_case_ : str = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
snake_case_ : Union[str, Any] = np.ones((6_4, 6_4) ,dtype=np.floataa )
snake_case_ : Optional[Any] = 0
if str(lowerCAmelCase_ ).startswith("""mps""" ):
snake_case_ : Optional[int] = torch.manual_seed(lowerCAmelCase_ )
else:
snake_case_ : str = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
snake_case_ : Any = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def a__ ( self :Tuple ):
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : str = self.pipeline_class(**lowerCAmelCase_ )
snake_case_ : List[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
snake_case_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
snake_case_ : List[str] = output.images
snake_case_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) ,return_dict=lowerCAmelCase_ ,)[0]
snake_case_ : Dict = image[0, -3:, -3:, -1]
snake_case_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : str = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def a__ ( self :Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Dict ):
snake_case_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case_ : int = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa )
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = """a hat"""
snake_case_ : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
snake_case_ : List[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" ,torch_dtype=torch.floataa )
snake_case_ : Optional[int] = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
snake_case_ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ , snake_case_ : str = pipe_prior(
lowerCAmelCase_ ,generator=lowerCAmelCase_ ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
snake_case_ : Union[str, Any] = pipeline(
image=lowerCAmelCase_ ,mask_image=lowerCAmelCase_ ,image_embeds=lowerCAmelCase_ ,negative_image_embeds=lowerCAmelCase_ ,generator=lowerCAmelCase_ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type="""np""" ,)
snake_case_ : str = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ ,lowerCAmelCase_ )
| 334
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=56 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , lowerCAmelCase_="block_sparse" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=3 , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
return model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-5 , lowerCAmelCase_="outputs" , lowerCAmelCase_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 321
| 0
|
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = [[0 for _ in range(lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ = 1
for n in range(m + 1 ):
for k in range(1 , lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 420
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __snake_case ( lowercase : float , lowercase : float , lowercase : bool = False ):
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __snake_case ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : float = 10**-1 ):
snake_case_ = cross(lowercase , lowercase )
snake_case_ = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 420
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def lowercase__ ( self : List[str] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__UpperCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__UpperCamelCase , '''env.csv''' ) , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''env.csv''' ) ).exists() )
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__UpperCamelCase : str ):
self.assertTrue(hasattr(__UpperCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''current''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , '''log.txt''' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''log.txt''' ) ).exists() )
| 713
|
"""simple docstring"""
import functools
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 0
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
class snake_case_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
A_ = None
A_ = None
class snake_case_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
A_ = datasets.Audio()
A_ = '''audio'''
A_ = AudioFolderConfig
A_ = 42 # definition at the bottom of the script
A_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
SCREAMING_SNAKE_CASE_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
SCREAMING_SNAKE_CASE_ = AUDIO_EXTENSIONS
| 34
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """xglm"""
SCREAMING_SNAKE_CASE_ : Any = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=2_5_6_0_0_8 , UpperCamelCase__ : Dict=2_0_4_8 , UpperCamelCase__ : Tuple=1_0_2_4 , UpperCamelCase__ : Any=4_0_9_6 , UpperCamelCase__ : List[str]=2_4 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Any=2 , **UpperCamelCase__ : Union[str, Any] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Optional[Any] = max_position_embeddings
__lowerCAmelCase: int = d_model
__lowerCAmelCase: Tuple = ffn_dim
__lowerCAmelCase: Optional[Any] = num_layers
__lowerCAmelCase: List[str] = attention_heads
__lowerCAmelCase: Optional[Any] = activation_function
__lowerCAmelCase: Tuple = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: int = activation_dropout
__lowerCAmelCase: Tuple = layerdrop
__lowerCAmelCase: List[str] = init_std
__lowerCAmelCase: str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase: List[str] = use_cache
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 346
| 0
|
import re
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(re.findall('''[ATCG]''' , a__ ) ) != len(a__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
from numpy import exp, pi, sqrt
def UpperCAmelCase_( a__ , a__ = 0.0 , a__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
snake_case__ : str = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
snake_case__ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
snake_case__ : int = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
snake_case__ : List[str] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
snake_case__ : Any = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
snake_case__ : int = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(6_4, 6_4)
)
snake_case__ : Any = tf.keras.preprocessing.image.img_to_array(test_image)
snake_case__ : Dict = np.expand_dims(test_image, axis=0)
snake_case__ : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
snake_case__ : Union[str, Any] = 'Normal'
if result[0][0] == 1:
snake_case__ : Union[str, Any] = 'Abnormality detected'
| 408
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
snake_case__ : int = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
snake_case__ : Optional[int] = {
'jukebox': 5_1_2,
}
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_LYRIC_TOKENS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=["v3", "v2", "v2"] , _snake_case=512 , _snake_case=5 , _snake_case="<|endoftext|>" , **_snake_case , ):
_UpperCAmelCase =AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
unk_token=_snake_case , n_genres=_snake_case , version=_snake_case , max_n_lyric_tokens=_snake_case , **_snake_case , )
_UpperCAmelCase =version
_UpperCAmelCase =max_n_lyric_tokens
_UpperCAmelCase =n_genres
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
_UpperCAmelCase =R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_UpperCAmelCase =oov.replace(R"\-'" , R"\-+'" )
_UpperCAmelCase =regex.compile(_snake_case )
_UpperCAmelCase ={v: k for k, v in self.artists_encoder.items()}
_UpperCAmelCase ={v: k for k, v in self.genres_encoder.items()}
_UpperCAmelCase ={v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =[self.artists_encoder.get(_snake_case , 0 ) for artist in list_artists]
for genres in range(len(_snake_case ) ):
_UpperCAmelCase =[self.genres_encoder.get(_snake_case , 0 ) for genre in list_genres[genres]]
_UpperCAmelCase =list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_UpperCAmelCase =[[self.lyrics_encoder.get(_snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return list(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self.prepare_for_tokenization(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase =self._tokenize(_snake_case )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_UpperCAmelCase =artists[idx].lower()
_UpperCAmelCase =[genres[idx].lower()]
else:
_UpperCAmelCase =self._normalize(artists[idx] ) + ".v2"
_UpperCAmelCase =[
self._normalize(_snake_case ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_UpperCAmelCase =regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
_UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
_UpperCAmelCase ={vocab[index]: index + 1 for index in range(len(_snake_case ) )}
_UpperCAmelCase =0
_UpperCAmelCase =len(_snake_case ) + 1
_UpperCAmelCase =self.vocab
_UpperCAmelCase ={v: k for k, v in self.vocab.items()}
_UpperCAmelCase =""
else:
_UpperCAmelCase =regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
_UpperCAmelCase =self._run_strip_accents(_snake_case )
_UpperCAmelCase =lyrics.replace("\\" , "\n" )
_UpperCAmelCase =self.out_of_vocab.sub("" , _snake_case ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =unicodedata.normalize("NFD" , _snake_case )
_UpperCAmelCase =[]
for char in text:
_UpperCAmelCase =unicodedata.category(_snake_case )
if cat == "Mn":
continue
output.append(_snake_case )
return "".join(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =(
[chr(_snake_case ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(_snake_case ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(_snake_case ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
_UpperCAmelCase =frozenset(_snake_case )
_UpperCAmelCase =re.compile(R"_+" )
_UpperCAmelCase ="".join([c if c in accepted else "_" for c in text.lower()] )
_UpperCAmelCase =pattern.sub("_" , _snake_case ).strip("_" )
return text
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return " ".join(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
# Convert to TensorType
if not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =TensorType(_snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
_UpperCAmelCase =tf.constant
_UpperCAmelCase =tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
_UpperCAmelCase =torch.tensor
_UpperCAmelCase =torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
_UpperCAmelCase =jnp.array
_UpperCAmelCase =_is_jax
else:
_UpperCAmelCase =np.asarray
_UpperCAmelCase =_is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_UpperCAmelCase =[inputs]
if not is_tensor(_snake_case ):
_UpperCAmelCase =as_tensor(_snake_case )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , _snake_case , _snake_case , _snake_case="" , _snake_case="pt" ):
_UpperCAmelCase =[0, 0, 0]
_UpperCAmelCase =[artist] * len(self.version )
_UpperCAmelCase =[genres] * len(self.version )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self.tokenize(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self._convert_token_to_id(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase =[-INFINITY] * len(full_tokens[-1] )
_UpperCAmelCase =[
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_snake_case ) )
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_snake_case ) )
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_snake_case ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.artists_decoder.get(_snake_case )
_UpperCAmelCase =[self.genres_decoder.get(_snake_case ) for genre in genres_index]
_UpperCAmelCase =[self.lyrics_decoder.get(_snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 408
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__: Union[str, Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: Dict =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Optional[int] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Any =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =TextToVideoSDPipeline(**UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Any =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] ="np"
lowerCamelCase__: Optional[int] =sd_pipe(**UpperCAmelCase_).frames
lowerCamelCase__: Dict =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase__: Optional[int] =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
lowerCamelCase__: Optional[int] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: Tuple =pipe.to("cuda")
lowerCamelCase__: List[Any] ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="pt").frames
lowerCamelCase__: str =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
lowerCamelCase__: List[str] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: Any =pipe.to("cuda")
lowerCamelCase__: Dict ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
lowerCamelCase__: List[Any] =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 707
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =Path("data_bin" )
lowerCamelCase__: int =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__a ).parent ) , checkpoint_file=Path(__a ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__a ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__a ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__a )
lowerCamelCase__: Optional[int] =xmod.model.encoder.sentence_encoder
lowerCamelCase__: Tuple =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__: Optional[Any] =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __a )
lowerCamelCase__: Tuple =XmodForSequenceClassification(__a ) if classification_head else XmodForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Any =xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[Any] =xmod_sent_encoder.embed_positions.weight
lowerCamelCase__: Any =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__: List[Any] =xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: List[Any] =model.roberta.encoder.layer[i]
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__: Any =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCamelCase__: List[str] =xmod_layer.self_attn.q_proj.weight
lowerCamelCase__: Any =xmod_layer.self_attn.q_proj.bias
lowerCamelCase__: Any =xmod_layer.self_attn.k_proj.weight
lowerCamelCase__: Tuple =xmod_layer.self_attn.k_proj.bias
lowerCamelCase__: Optional[int] =xmod_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] =xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: Optional[int] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCamelCase__: Dict =xmod_layer.self_attn.out_proj.weight
lowerCamelCase__: Optional[Any] =xmod_layer.self_attn.out_proj.bias
lowerCamelCase__: List[Any] =xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__: Dict =xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__: Optional[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCamelCase__: int =xmod_layer.fca.weight
lowerCamelCase__: List[str] =xmod_layer.fca.bias
# output
lowerCamelCase__: str =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCamelCase__: Optional[Any] =xmod_layer.fca.weight
lowerCamelCase__: int =xmod_layer.fca.bias
lowerCamelCase__: List[str] =xmod_layer.final_layer_norm.weight
lowerCamelCase__: List[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__: Tuple =xmod_layer.adapter_layer_norm.weight
lowerCamelCase__: List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__: Optional[int] =bert_output.adapter_modules[lang_code]
lowerCamelCase__: Optional[int] =xmod_layer.adapter_modules[lang_code]
lowerCamelCase__: Any =from_adapter.fca.weight
lowerCamelCase__: Tuple =from_adapter.fca.bias
lowerCamelCase__: Optional[Any] =from_adapter.fca.weight
lowerCamelCase__: Optional[int] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__: Tuple =xmod_sent_encoder.layer_norm.weight
lowerCamelCase__: Dict =xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__: List[Any] =xmod.model.classification_heads["mnli"].dense.weight
lowerCamelCase__: int =xmod.model.classification_heads["mnli"].dense.bias
lowerCamelCase__: List[str] =xmod.model.classification_heads["mnli"].out_proj.weight
lowerCamelCase__: Dict =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCamelCase__: Tuple =xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__: int =xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__: List[Any] =xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: str =xmod.model.encoder.lm_head.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: List[str] =xmod.encode(__a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__a )
lowerCamelCase__: List[Any] =model(__a )[0]
if classification_head:
lowerCamelCase__: Union[str, Any] =xmod.model.classification_heads["mnli"](xmod.extract_features(__a ) )
else:
lowerCamelCase__: Dict =xmod.model(__a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: Tuple =torch.allclose(__a , __a , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__a ).mkdir(parents=__a , exist_ok=__a )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 437
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def __a ( lowerCAmelCase_ : int ) -> typing.Counter[int]:
'''simple docstring'''
UpperCAmelCase_= Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ ,max_perimeter + 1 ):
UpperCAmelCase_= (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
UpperCAmelCase_= int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __a ( lowerCAmelCase_ : int = 10_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_= pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 593
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[str] = "detr"
a__ : Union[str, Any] = ["past_key_values"]
a__ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , __UpperCAmelCase : int=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=100 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : str=2_048 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : Any=6 , __UpperCAmelCase : List[str]=2_048 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : str=256 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Dict=1.0 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int="sine" , __UpperCAmelCase : List[Any]="resnet50" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : str=5 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.1 , **__UpperCAmelCase : Tuple , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase_= CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= backbone_config.get("""model_type""" )
UpperCAmelCase_= CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_= config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= None, None, None
UpperCAmelCase_= use_timm_backbone
UpperCAmelCase_= backbone_config
UpperCAmelCase_= num_channels
UpperCAmelCase_= num_queries
UpperCAmelCase_= d_model
UpperCAmelCase_= encoder_ffn_dim
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= encoder_attention_heads
UpperCAmelCase_= decoder_ffn_dim
UpperCAmelCase_= decoder_layers
UpperCAmelCase_= decoder_attention_heads
UpperCAmelCase_= dropout
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= activation_dropout
UpperCAmelCase_= activation_function
UpperCAmelCase_= init_std
UpperCAmelCase_= init_xavier_std
UpperCAmelCase_= encoder_layerdrop
UpperCAmelCase_= decoder_layerdrop
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= auxiliary_loss
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= backbone
UpperCAmelCase_= use_pretrained_backbone
UpperCAmelCase_= dilation
# Hungarian matcher
UpperCAmelCase_= class_cost
UpperCAmelCase_= bbox_cost
UpperCAmelCase_= giou_cost
# Loss coefficients
UpperCAmelCase_= mask_loss_coefficient
UpperCAmelCase_= dice_loss_coefficient
UpperCAmelCase_= bbox_loss_coefficient
UpperCAmelCase_= giou_loss_coefficient
UpperCAmelCase_= eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Union[str, Any] ) -> Dict:
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, any]:
UpperCAmelCase_= copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_= self.backbone_config.to_dict()
UpperCAmelCase_= self.__class__.model_type
return output
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Union[str, Any] = version.parse("1.11")
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 593
| 1
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase_ : Dict = '''MobileNetV1Config'''
# Base docstring
lowercase_ : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
lowercase_ : Optional[Any] = [1, 10_24, 7, 7]
# Image classification docstring
lowercase_ : Optional[int] = '''google/mobilenet_v1_1.0_224'''
lowercase_ : Union[str, Any] = '''tabby, tabby cat'''
lowercase_ : Dict = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple=None ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = {}
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Tuple = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = model
_SCREAMING_SNAKE_CASE : List[Any] = "MobilenetV1/Conv2d_0/"
_SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE : Dict = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
_SCREAMING_SNAKE_CASE : int = i + 1
_SCREAMING_SNAKE_CASE : str = i * 2
_SCREAMING_SNAKE_CASE : Any = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.convolution.weight
_SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.bias
_SCREAMING_SNAKE_CASE : Any = pointer.normalization.weight
_SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE : Optional[int] = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE : Optional[int] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.convolution.weight
_SCREAMING_SNAKE_CASE : int = pointer.normalization.bias
_SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.weight
_SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.running_var
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
_SCREAMING_SNAKE_CASE : Dict = model.classifier.weight
_SCREAMING_SNAKE_CASE : Optional[int] = model.classifier.bias
return tf_to_pt_map
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any] ) -> Any:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE : Dict = tf.train.list_variables(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
_SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE : Optional[Any] = _build_tf_to_pytorch_map(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
_SCREAMING_SNAKE_CASE : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
_SCREAMING_SNAKE_CASE : int = np.transpose(lowerCamelCase__, (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE : Any = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.transpose(lowerCamelCase__, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
_SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__, lowerCamelCase__ )
tf_weights.pop(name + "/RMSProp", lowerCamelCase__ )
tf_weights.pop(name + "/RMSProp_1", lowerCamelCase__ )
tf_weights.pop(name + "/ExponentialMovingAverage", lowerCamelCase__ )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def _lowerCAmelCase ( lowerCamelCase__ : torch.Tensor, lowerCamelCase__ : nn.Convad ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE : str = max(kernel_height - stride_height, 0 )
else:
_SCREAMING_SNAKE_CASE : str = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE : Any = max(kernel_width - stride_width, 0 )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_width - (in_width % stride_width), 0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_width // 2
_SCREAMING_SNAKE_CASE : Tuple = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_height // 2
_SCREAMING_SNAKE_CASE : str = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__, lowerCamelCase__, "constant", 0.0 )
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = False , snake_case__ = True , snake_case__ = True , ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_SCREAMING_SNAKE_CASE : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Convad(
in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ , groups=snake_case__ , bias=snake_case__ , padding_mode="zeros" , )
if use_normalization:
_SCREAMING_SNAKE_CASE : int = nn.BatchNormad(
num_features=snake_case__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=snake_case__ , track_running_stats=snake_case__ , )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
if use_activation:
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE : Any = config.hidden_act
else:
_SCREAMING_SNAKE_CASE : Any = None
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE : List[str] = apply_tf_padding(snake_case__ , self.convolution )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.convolution(snake_case__ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE : Dict = self.normalization(snake_case__ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE : List[str] = self.activation(snake_case__ )
return features
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = MobileNetVaConfig
A__ = load_tf_weights_in_mobilenet_va
A__ = """mobilenet_v1"""
A__ = """pixel_values"""
A__ = False
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if isinstance(snake_case__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case__ , snake_case__ = True ):
"""simple docstring"""
super().__init__(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = config
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE : int = MobileNetVaConvLayer(
snake_case__ , in_channels=config.num_channels , out_channels=snake_case__ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=3 , stride=strides[i] , groups=snake_case__ , ) )
self.layer.append(
MobileNetVaConvLayer(
snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_stem(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_module(snake_case__ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE : Tuple = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE : List[str] = torch.flatten(self.pooler(snake_case__ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=snake_case__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = config.num_labels
_SCREAMING_SNAKE_CASE : int = MobileNetVaModel(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(snake_case__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE : List[Any] = self.mobilenet_va(snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE : int = self.classifier(self.dropout(snake_case__ ) )
_SCREAMING_SNAKE_CASE : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE : List[str] = "single_label_classification"
else:
_SCREAMING_SNAKE_CASE : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE : Dict = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(snake_case__ , snake_case__ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE : List[str] = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE : int = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(snake_case__ , snake_case__ )
if not return_dict:
_SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states , )
| 295
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295
| 1
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
_lowerCAmelCase : Any = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384
| 0
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__lowercase ):
'''simple docstring'''
lowercase : str = ["onnx"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
requires_backends(cls , ['onnx'] )
| 706
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661
| 0
|
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(_lowercase )
for _ in range(_lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase_, UpperCAmelCase_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__a = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : List[Any] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''LayoutLMv2FeatureExtractor''']
_UpperCAmelCase : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_lowercase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
__snake_case = self.transformer_dir
shutil.copy(
os.path.join(lowercase__ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def lowercase__ ( self : Optional[Any] ):
__snake_case = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int=None ):
__snake_case = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__snake_case = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__snake_case = black.format_str(lowercase__ , mode=lowercase__ )
__snake_case = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowercase__ , 'w' , newline='\n' ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase__ )
with open(lowercase__ , 'r' ) as f:
self.assertTrue(f.read() , lowercase__ )
def lowercase__ ( self : List[Any] ):
__snake_case = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowercase__ , lowercase__ )
def lowercase__ ( self : Optional[int] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowercase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowercase__ ) , )
# Copy consistency with a really long name
__snake_case = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('Bert' , lowercase__ , lowercase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowercase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowercase__ ) , )
def lowercase__ ( self : str ):
__snake_case = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__snake_case = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['format_model_list'] )
self.assertFalse(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
__snake_case = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase__ )
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowercase__ , lowercase__ )
| 704
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : int = RobertaTokenizer
lowercase_ : int = RobertaTokenizerFast
lowercase_ : int = True
lowercase_ : Dict = {'''cls_token''': '''<s>'''}
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ):
__snake_case = 'lower newer'
__snake_case = 'lower newer'
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = 'lower newer'
__snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowercase__ ( self : int ):
__snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
__snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = 'Encode this sequence.'
__snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
__snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
__snake_case = 'Encode <mask> sequence'
__snake_case = 'Encode <mask>sequence'
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = 'A, <mask> AllenNLP sentence.'
__snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
__snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase__ ( self : Optional[int] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = F'{text_of_1_token} {text_of_1_token}'
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 427
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Any="" , lowerCAmelCase : Union[str, Any]="train") -> List[Any]:
"""simple docstring"""
assert os.path.isdir(lowerCAmelCase)
_snake_case : Optional[Any] = []
_snake_case : int = os.listdir(lowerCAmelCase)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_snake_case : Any = os.path.join(lowerCAmelCase , lowerCAmelCase)
if not os.path.isfile(lowerCAmelCase):
continue
self.documents.append(lowerCAmelCase)
def __len__( self : List[str]) -> int:
"""simple docstring"""
return len(self.documents)
def __getitem__( self : List[str] , lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
_snake_case : str = self.documents[idx]
_snake_case : List[str] = document_path.split("""/""")[-1]
with open(lowerCAmelCase , encoding="""utf-8""") as source:
_snake_case : str = source.read()
_snake_case , _snake_case : Any = process_story(lowerCAmelCase)
return document_name, story_lines, summary_lines
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
_snake_case : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : len(SCREAMING_SNAKE_CASE__ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
_snake_case : Dict = [_add_missing_period(SCREAMING_SNAKE_CASE__ ) for line in nonempty_lines]
# gather article lines
_snake_case : Dict = []
_snake_case : List[Any] = deque(SCREAMING_SNAKE_CASE__ )
while True:
try:
_snake_case : Union[str, Any] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(SCREAMING_SNAKE_CASE__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_snake_case : Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : not t.startswith("""@highlight""" ) , SCREAMING_SNAKE_CASE__ ) )
return story_lines, summary_lines
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
_snake_case : Optional[Any] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> Any:
if len(SCREAMING_SNAKE_CASE__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE__ )) )
return sequence
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
_snake_case : Optional[int] = torch.ones_like(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = sequence == pad_token_id
_snake_case : List[Any] = 0
return mask
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
_snake_case : Tuple = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in story_lines]
_snake_case : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence]
_snake_case : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in summary_lines]
_snake_case : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
_snake_case : List[str] = []
for sequence in batch:
_snake_case : Tuple = -1
_snake_case : List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE__ )
return torch.tensor(SCREAMING_SNAKE_CASE__ )
| 477
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 477
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0
| 1
|
from __future__ import annotations
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None:
A , A : Optional[int] = row, column
A : Optional[Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )]
def __str__( self ) -> str:
A : List[str] = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
A : Dict = 0
for row_vector in self.array:
for obj in row_vector:
A : Dict = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) )
A : Any = f'%{max_element_length}s'
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
A : List[str] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def snake_case ( self , __UpperCAmelCase ) -> bool:
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ) -> Any:
assert self.validate_indicies(__UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
assert self.validate_indicies(__UpperCAmelCase )
A : str = value
def __add__( self , __UpperCAmelCase ) -> Matrix:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
A : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A : int = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
A : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A : List[str] = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ) -> Matrix:
return self + (-another)
def __mul__( self , __UpperCAmelCase ) -> Matrix:
if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication
A : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A : List[str] = self[r, c] * another
return result
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
A : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A : Union[str, Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})'
raise TypeError(__UpperCAmelCase )
def snake_case ( self ) -> Matrix:
A : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A : Tuple = self[r, c]
return result
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A : Optional[Any] = v.transpose()
A : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case__ ( ):
# a^(-1)
A : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
A : int = 1
print(F'a^(-1) is {ainv}' )
# u, v
A : Union[str, Any] = Matrix(3 , 1 , 0 )
A , A , A : Union[str, Any] = 1, 2, -3
A : Union[str, Any] = Matrix(3 , 1 , 0 )
A , A , A : Optional[int] = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def snake_case__ ( ):
import doctest
doctest.testmod()
testa()
| 542
|
def snake_case__ ( lowerCamelCase_ = 1000 ):
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 542
| 1
|
from ...processing_utils import ProcessorMixin
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''SpeechT5FeatureExtractor'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''SpeechT5Tokenizer'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> str:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :Optional[int] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''audio''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''text''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text_target''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''audio_target''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
elif text is not None:
__SCREAMING_SNAKE_CASE : str = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : str = self.feature_extractor(audio_target=lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = targets['''input_values''']
elif text_target is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : Any = labels
__SCREAMING_SNAKE_CASE : Any = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Any = decoder_attention_mask
return inputs
def __magic_name__( self :List[str] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :int ) -> Any:
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''input_values''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''input_ids''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''labels''' , lowerCAmelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__SCREAMING_SNAKE_CASE : str = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and "input_ids" in labels[0]):
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor.feature_size
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = feature_size_hack
__SCREAMING_SNAKE_CASE : List[str] = targets['''input_values''']
else:
__SCREAMING_SNAKE_CASE : Dict = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : Any = labels
__SCREAMING_SNAKE_CASE : Any = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : int = decoder_attention_mask
return inputs
def __magic_name__( self :Union[str, Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[int] ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :int ) -> Optional[int]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 260
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Optional[Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 260
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : int=3 , lowerCamelCase : int=18 , lowerCamelCase : Tuple=30 , lowerCamelCase : Dict=400 , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=None , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=True , lowerCamelCase : int=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 18}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 108
|
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Any = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
lowerCamelCase_: Union[str, Any] = column
continue
lowerCamelCase_: Any = column / magnitude
# Subtract to cancel term
lowerCamelCase_: str = current_set[0]
lowerCamelCase_: Union[str, Any] = [first_row]
lowerCamelCase_: Optional[int] = current_set[1::]
for row in current_set:
lowerCamelCase_: List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_: Dict = final_set[0]
lowerCamelCase_: List[str] = []
lowerCamelCase_: Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_: Any = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
lowerCamelCase_: Dict = resultant
return final_set
def UpperCAmelCase_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase_: Optional[Any] = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_: Tuple = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_: Tuple = data_set.copy()
lowerCamelCase_: Tuple = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
lowerCamelCase_: Optional[int] = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _UpperCAmelCase )
lowerCamelCase_: List[Any] = data_set.copy()
lowerCamelCase_: str = simplify(_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = simplified[::-1]
lowerCamelCase_: list = []
for row in simplified:
lowerCamelCase_: Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_: List[str] = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
lowerCamelCase_: Optional[int] = temp_row[1::]
lowerCamelCase_: Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
lowerCamelCase_: List[Any] = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 423
| 0
|
"""simple docstring"""
from math import factorial
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase,__lowerCAmelCase ) or not isinstance(__lowerCAmelCase,__lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_UpperCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_UpperCAmelCase = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 712
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase = tf_top_k_top_p_filtering(a__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase = output[output != -float('inf' )]
_UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(a__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a__ , a__ , rtol=1E-12 )
tf.debugging.assert_equal(a__ , a__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase , snake_case ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCAmelCase__ = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 2
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2, 0], [1_02, 1_03]]
_UpperCAmelCase = [[1, 0], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for batch_size in range(1 , len(a__ ) + 1 ):
_UpperCAmelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2], [1_02, 1_03]]
_UpperCAmelCase = [[1], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for input_row in range(len(a__ ) ):
_UpperCAmelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
@require_tensorflow_text
def __A ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a__ )
class lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
_UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a__ , 'spiece.model' ) , 'rb' ).read() )
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __A ( self , a__ , *a__ , **a__ ):
_UpperCAmelCase = self.tokenizer.tokenize(a__ )
_UpperCAmelCase , _UpperCAmelCase = text.pad_model_inputs(
a__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase = self.model.generate(input_ids=a__ , attention_mask=a__ )
return self.tokenizer.detokenize(a__ )
_UpperCAmelCase = CompleteSentenceTransformer()
_UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
_UpperCAmelCase = complete_model(a__ )
_UpperCAmelCase = tf.keras.Model(a__ , a__ )
keras_model.save(a__ )
def __A ( self ):
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_UpperCAmelCase = 14
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 'Hello, my dog is cute and'
_UpperCAmelCase = tokenizer(a__ , return_tensors='tf' )
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ):
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = 'Hugging Face is a technology company based in New York and Paris.'
_UpperCAmelCase = bart_tokenizer(a__ , return_tensors='tf' ).input_ids
_UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
class lowerCAmelCase ( snake_case ):
def __A ( self , a__ , a__=None , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a__ , a__ ) )
class lowerCAmelCase ( bart_model.model.encoder.__class__ ):
def __A ( self , a__ , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
with self.assertRaises(a__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a__ , foo='bar' )
| 494
| 0
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 346
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case ( __snake_case ):
@staticmethod
def lowercase_ ( UpperCamelCase__ : ArgumentParser)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parser.add_parser("add-new-model")
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode.")
add_new_model_parser.add_argument("--testing_file" , type=UpperCamelCase__ , help="Configuration file on which to run.")
add_new_model_parser.add_argument(
"--path" , type=UpperCamelCase__ , help="Path to cookiecutter. Should only be used for testing purposes.")
add_new_model_parser.set_defaults(func=UpperCamelCase__)
def __init__( self : Dict , UpperCamelCase__ : bool , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , *UpperCamelCase__ : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = testing
__lowerCAmelCase: Any = testing_file
__lowerCAmelCase: str = path
def lowercase_ ( self : int)-> Optional[int]:
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead.")
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n")
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCAmelCase: Union[str, Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:2_2]]
if len(UpperCamelCase__) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory.")
__lowerCAmelCase: Any = (
Path(UpperCamelCase__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__lowerCAmelCase: Any = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__))
else:
with open(self._testing_file , "r") as configuration_file:
__lowerCAmelCase: Any = json.load(UpperCamelCase__)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
__lowerCAmelCase: Dict = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r") as configuration_file:
__lowerCAmelCase: Optional[int] = json.load(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = configuration["lowercase_modelname"]
__lowerCAmelCase: List[str] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"{directory}/configuration.json")
__lowerCAmelCase: Optional[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "Flax" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: Any = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__)
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=UpperCamelCase__)
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , "w"):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(UpperCamelCase__ : int):
with open(UpperCamelCase__ , "r") as f:
__lowerCAmelCase: int = f.readlines()
with open(UpperCamelCase__ , "w") as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str]):
# Create temp file
__lowerCAmelCase , __lowerCAmelCase: List[str] = mkstemp()
__lowerCAmelCase: Dict = False
with fdopen(UpperCamelCase__ , "w") as new_file:
with open(UpperCamelCase__) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__)
if line_to_copy_below in line:
__lowerCAmelCase: str = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__)
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file.")
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__)
# Remove original file
remove(UpperCamelCase__)
# Move new file
move(UpperCamelCase__ , UpperCamelCase__)
def skip_units(UpperCamelCase__ : Union[str, Any]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__ : List[str]):
with open(UpperCamelCase__) as datafile:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: str = False
__lowerCAmelCase: Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Dict = skip_units(UpperCamelCase__)
elif "# Below: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Any = skip_units(UpperCamelCase__)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[str] = []
elif "# Replace with" in line and "##" not in line:
__lowerCAmelCase: List[str] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__)
remove(UpperCamelCase__)
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
os.rmdir(UpperCamelCase__)
| 346
| 1
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase : str = TypeVar('T')
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return (position - 1) // 2
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return (2 * position) + 1
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return (2 * position) + 2
class A ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
A : list[tuple[T, int]] = []
A : dict[T, int] = {}
A : int = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.heap )
def __lowerCAmelCase ( self ) -> bool:
"""simple docstring"""
return self.elements == 0
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
A : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A, A : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A, A : Any = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE )
return elem
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : int = self.position_map[elem]
A : Union[str, Any] = (elem, weight)
if position > 0:
A : Tuple = get_parent_position(SCREAMING_SNAKE_CASE )
A, A : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE )
else:
self._bubble_down(SCREAMING_SNAKE_CASE )
else:
self._bubble_down(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
A : Tuple = get_parent_position(SCREAMING_SNAKE_CASE )
A, A : List[Any] = self.heap[curr_pos]
A, A : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_up(SCREAMING_SNAKE_CASE )
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = self.position_map[elem]
A, A : Any = self.heap[curr_pos]
A : List[str] = get_child_left_position(SCREAMING_SNAKE_CASE )
A : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
A, A : List[Any] = self.heap[child_left_position]
A, A : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
A, A : Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
A, A : List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = self.heap[nodea_pos][0]
A : Any = self.heap[nodea_pos][0]
A, A : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A : Optional[Any] = nodea_pos
A : Any = nodea_pos
class A ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
A : dict[T, dict[T, int]] = {}
A : int = 0
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self ) -> int:
"""simple docstring"""
return self.nodes
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node not in self.connections:
A : str = {}
self.nodes += 1
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.add_node(SCREAMING_SNAKE_CASE )
self.add_node(SCREAMING_SNAKE_CASE )
A : int = weight
A : str = weight
def lowerCAmelCase_ ( snake_case__ , ):
'''simple docstring'''
A : dict[T, int] = {node: maxsize for node in graph.connections}
A : dict[T, T | None] = {node: None for node in graph.connections}
A : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case__ , snake_case__ )
if priority_queue.is_empty():
return dist, parent
# initialization
A : int = priority_queue.extract_min()
A : Union[str, Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
A : int = node
# running prim's algorithm
while not priority_queue.is_empty():
A : Optional[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
A : Tuple = node
return dist, parent
| 343
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = torch.load(snake_case__ , map_location='''cpu''' )
A : Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A : Any = v
else:
A : int = v
A : str = chkpt['''params''']
A : Tuple = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A : Dict = chkpt['''dico_word2id''']
A : Optional[int] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
A : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
A : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
| 1
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *a :Tuple , **a :Any ) -> int:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
_A = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self :int , a :Any , a :Union[str, Any] , a :List[str] ) -> str:
__UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowerCamelCase ( self :Optional[Any] , a :Tuple , a :Any ) -> Dict:
__UpperCamelCase : Tuple = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
__UpperCamelCase : Optional[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCamelCase : Any = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
__UpperCamelCase : List[Any] = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
pass
@require_torch
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : Any = "hf-internal-testing/tiny-detr-mobilenetsv3"
__UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(a )
__UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(a )
__UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=a , feature_extractor=a )
__UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
] , )
__UpperCamelCase : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : List[str] = "facebook/detr-resnet-50"
__UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
__UpperCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(a )
__UpperCamelCase : Dict = ObjectDetectionPipeline(model=a , feature_extractor=a )
__UpperCamelCase : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
__UpperCamelCase : Union[str, Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self :Tuple ) -> Tuple:
__UpperCamelCase : Union[str, Any] = "facebook/detr-resnet-50"
__UpperCamelCase : Union[str, Any] = pipeline("object-detection" , model=a )
__UpperCamelCase : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
__UpperCamelCase : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self :Tuple ) -> str:
__UpperCamelCase : Optional[Any] = 0.9985
__UpperCamelCase : Dict = "facebook/detr-resnet-50"
__UpperCamelCase : List[Any] = pipeline("object-detection" , model=a )
__UpperCamelCase : Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowerCamelCase ( self :Tuple ) -> str:
__UpperCamelCase : Dict = "Narsil/layoutlmv3-finetuned-funsd"
__UpperCamelCase : List[str] = 0.9993
__UpperCamelCase : Any = pipeline("object-detection" , model=a , threshold=a )
__UpperCamelCase : List[str] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
] , )
| 557
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowercase : Union[str, Any] = ['bert-base-uncased', 'bert-base-cased']
lowercase : Any = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model):
'''simple docstring'''
def __init__( self :List[Any] , a :int ) -> int:
super().__init__()
__UpperCamelCase : Any = tokenizer
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(a )
__UpperCamelCase : Dict = TFAutoModel.from_config(a )
def _lowerCamelCase ( self :List[Any] , a :Dict ) -> Any:
__UpperCamelCase : int = self.tokenizer(a )
__UpperCamelCase : int = self.bert(**a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> str:
super().setUp()
__UpperCamelCase : Dict = [
BertTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCamelCase : List[Any] = [TFBertTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(a , use_fast_bert_tokenizer=a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCamelCase : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCamelCase ( self :List[Any] ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase : str = tokenizer(a , return_tensors="tf" , padding="longest" )
__UpperCamelCase : Any = tf_tokenizer(a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : List[str] = tf_tokenizer(self.paired_sentences )
__UpperCamelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : Optional[Any] = tf.function(a )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase : Tuple = tf.constant(a )
__UpperCamelCase : List[str] = compiled_tokenizer(a )
__UpperCamelCase : Any = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCamelCase ( self :List[Any] ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : str = ModelToSave(tokenizer=a )
__UpperCamelCase : List[Any] = tf.convert_to_tensor(self.test_sentences )
__UpperCamelCase : Optional[int] = model(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase : Optional[Any] = Path(a ) / "saved.model"
model.save(a )
__UpperCamelCase : List[Any] = tf.keras.models.load_model(a )
__UpperCamelCase : Optional[Any] = loaded_model(a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 557
| 1
|
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = len(_SCREAMING_SNAKE_CASE )
A_ = [0] * len_array
if len_array > 0:
A_ = array[0]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
A_ = self.prefix_sum[i - 1] + array[i]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> bool:
A_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case : Union[str, Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__snake_case : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__snake_case : Optional[int] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Dict ) -> Dict:
return float((preds == labels).mean() )
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : List[str], _UpperCamelCase : Tuple="binary" ) -> int:
A_ = simple_accuracy(_UpperCamelCase, _UpperCamelCase )
A_ = float(fa_score(y_true=_UpperCamelCase, y_pred=_UpperCamelCase, average=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
A_ = {}
for id_pred, label in zip(_UpperCamelCase, _UpperCamelCase ):
A_ = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
A_ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A_ = [(pred, label)]
A_ ,A_ = [], []
for question, preds_labels in question_map.items():
A_ ,A_ = zip(*_UpperCamelCase )
A_ = fa_score(y_true=_UpperCamelCase, y_pred=_UpperCamelCase, average='''macro''' )
fas.append(_UpperCamelCase )
A_ = int(sum(pred == label for pred, label in preds_labels ) == len(_UpperCamelCase ) )
ems.append(_UpperCamelCase )
A_ = float(sum(_UpperCamelCase ) / len(_UpperCamelCase ) )
A_ = sum(_UpperCamelCase ) / len(_UpperCamelCase )
A_ = float(fa_score(y_true=_UpperCamelCase, y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> str:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __A ( self ) -> Tuple:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg='''macro''' )
elif self.config_name == "record":
A_ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
A_ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 174
| 1
|
'''simple docstring'''
from itertools import permutations
def UpperCamelCase__ ( _lowercase : tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase: List[Any] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase__ ( _lowercase : int = 1_0 ) -> int:
return sum(
int("""""".join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 523
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) < 2:
return collection
def circle_sort_util(_lowercase : list , _lowercase : int , _lowercase : int ) -> bool:
__UpperCAmelCase: Tuple = False
if low == high:
return swapped
__UpperCAmelCase: int = low
__UpperCAmelCase: int = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase, __UpperCAmelCase: List[str] = (
collection[right],
collection[left],
)
__UpperCAmelCase: Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase, __UpperCAmelCase: str = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase: Union[str, Any] = True
__UpperCAmelCase: List[str] = low + int((high - low) / 2 )
__UpperCAmelCase: Optional[int] = circle_sort_util(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[Any] = circle_sort_util(_lowercase , mid + 1 , _lowercase )
return swapped or left_swap or right_swap
__UpperCAmelCase: str = True
while is_not_sorted is True:
__UpperCAmelCase: Dict = circle_sort_util(_lowercase , 0 , len(_lowercase ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 523
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase = 1.054_571_817E-34 # unit of ℏ : J * s
_lowerCAmelCase = 3E8 # unit of c : m * s^-1
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowerCAmelCase : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : Any = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16
| 0
|
'''simple docstring'''
def __lowerCamelCase ( ) ->List[str]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
a__ : Any = generate_large_matrix()
a__ : Optional[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCAmelCase_ ) ->Tuple:
assert all(row == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for row in grid )
assert all(list(UpperCAmelCase_ ) == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for col in zip(*UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ ) ->List[str]:
snake_case__ = 0
snake_case__ = len(UpperCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ = (left + right) // 2
snake_case__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ = mid + 1
else:
snake_case__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ ) ->str:
snake_case__ = 0
snake_case__ = len(grid[0] )
for i in range(len(UpperCAmelCase_ ) ):
snake_case__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCAmelCase_ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCAmelCase_ ) ->Tuple:
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCAmelCase_ ) ->Dict:
snake_case__ = 0
for row in grid:
for i, number in enumerate(UpperCAmelCase_ ):
if number < 0:
total += len(UpperCAmelCase_ ) - i
break
return total
def __lowerCamelCase ( ) ->Optional[Any]:
from timeit import timeit
print('Running benchmarks' )
snake_case__ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ = timeit(f'''{func}(grid=grid)''' , setup=UpperCAmelCase_ , number=5_00 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 368
|
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase ( UpperCamelCase__ : BertModel , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__UpperCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ : str ):
for patt, repl in iter(UpperCamelCase__ ):
__UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f"""bert/{name}"""
def create_tf_var(UpperCamelCase__ : np.ndarray , UpperCamelCase__ : str , UpperCamelCase__ : tf.Session ):
__UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
__UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase = torch_tensor.T
__UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = session.run(UpperCamelCase__ )
print(f"""Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}""" )
__UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def lowerCAmelCase ( UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
__UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 262
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = len(__lowerCamelCase )
__snake_case : Optional[int] = [[0] * n for i in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
__snake_case : Optional[Any] = y_points[i]
for i in range(2 , __lowerCamelCase ):
for j in range(__lowerCamelCase , __lowerCamelCase ):
__snake_case : str = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case : List[Any] = "src/diffusers"
_snake_case : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case : List[str] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case : Dict = spec.loader.load_module()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , __lowerCamelCase ) is not None
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = object_name.split("." )
__snake_case : Any = 0
# First let's find the module where our object lives.
__snake_case : Optional[int] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , F'{module}.py' ) ):
i += 1
if i < len(__lowerCamelCase ):
__snake_case : Dict = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__lowerCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__snake_case : Any = ""
__snake_case : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__snake_case : Optional[Any] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
_snake_case : Any = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_snake_case : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
_snake_case : Optional[int] = re.compile(R"<FILL\s+[^>]*>")
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = code.split("\n" )
__snake_case : List[str] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__snake_case : str = F'class Bla:\n{code}'
__snake_case : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=__lowerCamelCase )
__snake_case : int = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__snake_case , __snake_case : List[str] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : List[Any] = f.readlines()
__snake_case : int = []
__snake_case : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__snake_case : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__snake_case , __snake_case , __snake_case : List[str] = search.groups()
__snake_case : List[str] = find_code_in_diffusers(__lowerCamelCase )
__snake_case : str = get_indent(__lowerCamelCase )
__snake_case : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__snake_case : Tuple = theoretical_indent
__snake_case : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__snake_case : str = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__snake_case : Union[str, Any] = lines[line_index]
__snake_case : Any = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(F'^{indent}# End copy' , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : int = lines[start_index:line_index]
__snake_case : int = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__snake_case : Union[str, Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__snake_case : Optional[int] = replace_pattern.replace("with" , "" ).split("," )
__snake_case : Tuple = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__snake_case , __snake_case , __snake_case : Optional[Any] = pattern.groups()
__snake_case : Tuple = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__snake_case : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__snake_case : Union[str, Any] = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__snake_case : str = blackify(lines[start_index - 1] + theoretical_code )
__snake_case : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__snake_case : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__snake_case : List[Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(__lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCAmelCase_ ( __lowerCamelCase = False ):
__snake_case : Optional[Any] = glob.glob(os.path.join(__lowerCamelCase , "**/*.py" ) , recursive=__lowerCamelCase )
__snake_case : Dict = []
for filename in all_files:
__snake_case : Union[str, Any] = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case : Union[str, Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 203
| 1
|
import math
import sys
import cva
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = math.sqrt(lowerCamelCase__ )
lowerCamelCase_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Dict , lowercase : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowerCamelCase__ ):
for j in range(0 , lowerCamelCase__ ):
lowerCamelCase_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowerCamelCase__ , lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict , lowercase : Optional[int] , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = np.zeros(img.shape )
lowerCamelCase_ = get_gauss_kernel(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCamelCase_ = get_slice(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCamelCase_ = vec_gaussian(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = np.multiply(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = np.multiply(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = np.sum(lowerCamelCase__ ) / np.sum(lowerCamelCase__ )
lowerCamelCase_ = val
return imga
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = args[1] if args[1:] else "../image_data/lena.jpg"
lowerCamelCase_ = float(args[2] ) if args[2:] else 1.0
lowerCamelCase_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCamelCase_ = int(args[4] )
lowerCamelCase_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCamelCase_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parse_args(sys.argv)
lowerCamelCase : Union[str, Any] = cva.imread(filename, 0)
cva.imshow("input image", img)
lowerCamelCase : str = img / 255
lowerCamelCase : List[Any] = out.astype("float32")
lowerCamelCase : int = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase : List[str] = out * 255
lowerCamelCase : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 70
|
import math
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any]=0 ): # a graph with Node 0,1,...,N-1
lowercase__ : Dict = n
lowercase__ : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE )] for i in range(0 , SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowercase__ : Optional[int] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE )] for i in range(0 , SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[Any] = w
def snake_case ( self : int ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase__ : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 496
| 0
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCAmelCase__ = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _lowerCAmelCase( __A , __A ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
return (preds == labels).mean()
def _lowerCAmelCase( __A , __A ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCAmelCase = simple_accuracy(__A , __A )
UpperCAmelCase = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowerCAmelCase( __A , __A ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCAmelCase = pearsonr(__A , __A )[0]
UpperCAmelCase = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowerCAmelCase( __A , __A , __A ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
assert len(__A ) == len(__A ), F"Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def _lowerCAmelCase( __A , __A , __A ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
if len(__A ) != len(__A ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 1
|
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ) -> List[str]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase__ ,lowerCAmelCase__ )
if k.startswith('encoder' ):
SCREAMING_SNAKE_CASE_ = k.replace('.attn' ,'.self_attn' )
SCREAMING_SNAKE_CASE_ = k.replace('norm1' ,'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm2' ,'final_layer_norm' )
elif k.startswith('decoder' ):
SCREAMING_SNAKE_CASE_ = k.replace('norm1' ,'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm2' ,'encoder_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm3' ,'final_layer_norm' )
return k
def _UpperCamelCase ( lowerCAmelCase__: Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
SCREAMING_SNAKE_CASE_ = sd.pop(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = k.replace('layernorm_embedding' ,'layer_norm' )
assert new_k not in sd
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE : Any = ["START"]
@torch.no_grad()
def _UpperCamelCase ( lowerCAmelCase__: Dict ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: int ) -> str:
SCREAMING_SNAKE_CASE_ = torch.load(lowerCAmelCase__ ,map_location='cpu' )
SCREAMING_SNAKE_CASE_ = model['model']
SCREAMING_SNAKE_CASE_ = BlenderbotConfig.from_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = BlenderbotForConditionalGeneration(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase__ )
m.model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ )
m.half()
m.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 294
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
_a = BartTokenizer
def __init__( self, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase="replace", _lowercase="<s>", _lowercase="</s>", _lowercase="</s>", _lowercase="<s>", _lowercase="<unk>", _lowercase="<pad>", _lowercase="<mask>", _lowercase=False, _lowercase=True, **_lowercase, ) -> Dict:
super().__init__(
_lowercase, _lowercase, tokenizer_file=_lowercase, errors=_lowercase, bos_token=_lowercase, eos_token=_lowercase, sep_token=_lowercase, cls_token=_lowercase, unk_token=_lowercase, pad_token=_lowercase, mask_token=_lowercase, add_prefix_space=_lowercase, trim_offsets=_lowercase, **_lowercase, )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', _lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**_lowercase )
SCREAMING_SNAKE_CASE_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ = 'post_processor'
SCREAMING_SNAKE_CASE_ = getattr(self.backend_tokenizer, _lowercase, _lowercase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ = False
if state.get('add_prefix_space', _lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = True
if state.get('trim_offsets', _lowercase ) != trim_offsets:
SCREAMING_SNAKE_CASE_ = trim_offsets
SCREAMING_SNAKE_CASE_ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = component_class(**_lowercase )
setattr(self.backend_tokenizer, _lowercase, _lowercase )
@property
def a__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else value
SCREAMING_SNAKE_CASE_ = value
def a__ ( self, *_lowercase, **_lowercase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words', _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words', _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowercase, name=_lowercase )
return tuple(_lowercase )
def a__ ( self, _lowercase, _lowercase=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self, _lowercase, _lowercase = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 294
| 1
|
import os
def lowercase ( _a = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(_a ) ,_a ) ) as in_file:
UpperCAmelCase_: str = in_file.read()
UpperCAmelCase_: Union[str, Any] = [[int(_a ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase_: List[Any] = [[0 for cell in row] for row in grid]
UpperCAmelCase_: Any = len(grid[0] )
UpperCAmelCase_: int = [[0 for i in range(_a )] for j in range(_a )]
UpperCAmelCase_: int = grid[0][0]
for i in range(1 ,_a ):
UpperCAmelCase_: List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_a ):
UpperCAmelCase_: Any = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_a ):
for j in range(1 ,_a ):
UpperCAmelCase_: Union[str, Any] = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306
|
def lowercase ( _a = 2000000 ) -> int:
UpperCAmelCase_: List[str] = [0 for i in range(n + 1 )]
UpperCAmelCase_: str = 1
UpperCAmelCase_: Union[str, Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,_a ):
UpperCAmelCase_: Union[str, Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for i in range(_a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( __a , __a ):
A__ = 0
A__ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A__ = i + 1
else:
A__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 571
|
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 100_0003
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = len(SCREAMING_SNAKE_CASE )
A_ : int = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
A_ : int = 0
A_ : Dict = 0
A_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = '''abc1abc12'''
A_ : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ : Optional[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
A_ : List[str] = '''ABABX'''
A_ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
A_ : Optional[int] = '''AAAB'''
A_ : Optional[Any] = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
A_ : Optional[Any] = '''abcdabcy'''
A_ : str = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
A_ : Tuple = '''Lü'''
A_ : Dict = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 590
| 0
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''detr'''
__UpperCamelCase : List[Any] = ['''past_key_values''']
__UpperCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__="resnet50" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ : int = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ : List[str] = None, None, None
SCREAMING_SNAKE_CASE__ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Tuple = backbone_config
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] = backbone
SCREAMING_SNAKE_CASE__ : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Tuple = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 708
|
"""simple docstring"""
import math
import os
import sys
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = """"""
try:
with open(_snake_case ,"""rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : str = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : int = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
lexicon.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = last_match_id
if math.loga(_snake_case ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Optional[int] = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : Tuple = bin(_snake_case )[2:]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = """""", """"""
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_snake_case ,_snake_case ,_snake_case ,_snake_case )
index += 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.path.getsize(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = bin(_snake_case )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 8
try:
with open(_snake_case ,"""wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 ,len(_snake_case ) ,_snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_snake_case ,2 ).to_bytes(1 ,byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = read_file_binary(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = compress_data(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_file_length(_snake_case ,_snake_case )
write_file_binary(_snake_case ,_snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 545
| 0
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __lowerCamelCase ( _lowercase ) -> Tuple:
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = after_output[0].numpy()
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-5 )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = to_atuple(vision_model.config.image_size )
UpperCamelCase = to_atuple(vision_model.config.patch_size )
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_pretrained_model_and_inputs()
UpperCamelCase = model_a(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model_a(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = after_outputs[0].numpy()
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-5 )
@require_tf
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
UpperCamelCase = 13
UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCamelCase = random_attention_mask([batch_size, 4] )
UpperCamelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = TFViTModel(SCREAMING_SNAKE_CASE__ , name='vision_model' )
UpperCamelCase = TFBertModel(SCREAMING_SNAKE_CASE__ , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFViTModelTester(self )
UpperCamelCase = TFBertModelTester(self )
UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = vision_config_and_inputs
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
UpperCamelCase = 13
UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCamelCase = random_attention_mask([batch_size, 4] )
UpperCamelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase = to_atuple(vision_model.config.image_size )
UpperCamelCase = to_atuple(vision_model.config.patch_size )
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFDeiTModel(SCREAMING_SNAKE_CASE__ , name='vision_model' )
UpperCamelCase = TFRobertaModel(SCREAMING_SNAKE_CASE__ , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFDeiTModelTester(self )
UpperCamelCase = TFRobertaModelTester(self )
UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = vision_config_and_inputs
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
UpperCamelCase = 13
UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCamelCase = random_attention_mask([batch_size, 4] )
UpperCamelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFCLIPVisionModel(SCREAMING_SNAKE_CASE__ , name='vision_model' )
UpperCamelCase = TFBertModel(SCREAMING_SNAKE_CASE__ , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = TFCLIPVisionModelTester(self )
UpperCamelCase = TFBertModelTester(self )
UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase = vision_config_and_inputs
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
| 282
|
def __lowerCamelCase ( _lowercase ) -> list:
UpperCamelCase = len(_lowercase )
for _ in range(_lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase , UpperCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_snake_case = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 282
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"question": Value("string"), "context": Value("string")})
a_ = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
})
})
a_ = "question"
a_ = "context"
a_ = "answers"
@property
def A ( self : List[Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 216
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = {}
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ):
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
A__ = []
def lowercase_ ( self ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def lowercase_ ( self , UpperCamelCase__=-1 ):
'''simple docstring'''
if c == -1:
A__ = floor(random() * 1_00_00 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def lowercase_ ( self , UpperCamelCase__=-2 ):
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self , UpperCamelCase__=-2 ):
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def lowercase_ ( self ):
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = False
indirect_parents.append(UpperCamelCase__ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = False
indirect_parents.append(UpperCamelCase__ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def lowercase_ ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
'''simple docstring'''
A__ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ = time()
return end - begin
def lowercase_ ( self , UpperCamelCase__=-2 ):
'''simple docstring'''
A__ = time()
self.bfs(UpperCamelCase__ )
A__ = time()
return end - begin
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = {}
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ):
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def lowercase_ ( self , UpperCamelCase__=-1 ):
'''simple docstring'''
if c == -1:
A__ = floor(random() * 1_00_00 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def lowercase_ ( self , UpperCamelCase__=-2 ):
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self ):
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = False
indirect_parents.append(UpperCamelCase__ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(UpperCamelCase__ ) != 0:
A__ = stack[len(UpperCamelCase__ ) - 1]
else:
A__ = False
indirect_parents.append(UpperCamelCase__ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def lowercase_ ( self ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
'''simple docstring'''
A__ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ = time()
return end - begin
def lowercase_ ( self , UpperCamelCase__=-2 ):
'''simple docstring'''
A__ = time()
self.bfs(UpperCamelCase__ )
A__ = time()
return end - begin
| 337
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Dict = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def snake_case_ ( lowercase__ : str = "dhaka" , lowercase__ : int = 5 ):
'''simple docstring'''
_lowerCAmelCase =min(lowercase__ , 50 ) # Prevent abuse!
_lowerCAmelCase ={
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_lowerCAmelCase =requests.get("""https://www.google.com/search""" , params=lowercase__ , headers=lowercase__ )
_lowerCAmelCase =BeautifulSoup(html.text , """html.parser""" )
_lowerCAmelCase ="""""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_lowerCAmelCase =json.dumps(lowercase__ )
_lowerCAmelCase =json.loads(lowercase__ )
_lowerCAmelCase =re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , lowercase__ , )
if not matched_google_image_data:
return 0
_lowerCAmelCase =re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(lowercase__ ) , )
_lowerCAmelCase =re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =urllib.request.build_opener()
_lowerCAmelCase =[
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(lowercase__ )
_lowerCAmelCase =f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : Any = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 149
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = XCLIPTextConfig()
# derive patch size from model name
_A = model_name.find('''patch''' )
_A = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
_A = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
_A = 7_68
_A = 30_72
_A = 12
_A = 10_24
_A = 40_96
_A = 16
_A = 24
_A = 7_68
_A = 30_72
if model_name == "xclip-large-patch14-16-frames":
_A = 3_36
_A = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
_A = 7_68
return config
def __A ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
_A = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
_A = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
_A = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_A = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_A = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_A = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
_A = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
_A = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
_A = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
_A = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
_A = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
_A = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
_A = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
_A = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
_A = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
_A = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
_A = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
_A = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
_A = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
_A = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
_A = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
_A = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
_A = key.split('''.''' )
if key.startswith('''visual''' ):
_A = key_split[3]
_A = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_A = val[
:dim, :
]
_A = val[
dim : dim * 2, :
]
_A = val[
-dim:, :
]
else:
_A = val[
:dim
]
_A = val[
dim : dim * 2
]
_A = val[
-dim:
]
else:
if "weight" in key:
_A = val[
:dim, :
]
_A = val[
dim : dim * 2, :
]
_A = val[
-dim:, :
]
else:
_A = val[:dim]
_A = val[
dim : dim * 2
]
_A = val[-dim:]
elif key.startswith('''mit''' ):
_A = key_split[2]
_A = config.vision_config.mit_hidden_size
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = key_split[2]
_A = config.text_config.hidden_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[
dim : dim * 2
]
_A = val[-dim:]
else:
_A = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_A = val.T
_A = val
return orig_state_dict
def __A ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
_A = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
_A = '''eating_spaghetti.npy'''
elif num_frames == 32:
_A = '''eating_spaghetti_32_frames.npy'''
_A = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=_lowercase , repo_type='''dataset''' , )
_A = np.load(_lowercase )
return list(_lowercase )
def __A ( _lowercase , _lowercase=None , _lowercase=False ):
'''simple docstring'''
_A = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
_A = model_to_url[model_name]
_A = 8
if "16-frames" in model_name:
_A = 16
elif "shot" in model_name:
_A = 32
_A = get_xclip_config(_lowercase , _lowercase )
_A = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
_A = '''pytorch_model.bin'''
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
_A = torch.load(_lowercase , map_location='''cpu''' )['''model''']
else:
_A = torch.hub.load_state_dict_from_url(_lowercase )['''model''']
_A = convert_state_dict(_lowercase , _lowercase )
_A = XCLIPModel(_lowercase )
_A ,_A = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_A = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24
_A = VideoMAEImageProcessor(size=_lowercase )
_A = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
_A = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
_A = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
_A = prepare_video(_lowercase )
_A = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=_lowercase , return_tensors='''pt''' , padding=_lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
_A = model(**_lowercase )
# Verify outputs
_A = outputs.logits_per_video
_A = logits_per_video.softmax(dim=1 )
print('''Probs:''' , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
_A = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
_A = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_A = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
_A = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_A = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
_A = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_A = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_A = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_A = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_A = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_A = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_A = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_A = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_A = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_A = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_A = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_A = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_A = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(_lowercase , organization='''nielsr''' )
processor.push_to_hub(_lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(_lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 484
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(_lowercase , _lowercase )
if weight_type is not None:
_A = getattr(_lowercase , _lowercase ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(_lowercase )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(_lowercase )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(_lowercase )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
_A = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 484
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase_ ( lowercase__ , lowercase__="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) as f:
a_ =json.load(lowerCamelCase_ )
a_ ={}
a_ =[]
a_ =[]
for key, info in class_info.items():
a_ =info["""name"""]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(lowerCamelCase_ ) )
a_ =thing_ids
a_ =class_names
return metadata
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=1_0 , lowerCAmelCase_=False , lowerCAmelCase_=2_5_5 , lowerCAmelCase_="shi-labs/oneformer_demo" , lowerCAmelCase_="ade20k_panoptic.json" , lowerCAmelCase_=1_0 , ) -> Union[str, Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ ={"""shortest_edge""": 3_2, """longest_edge""": 1_3_3_3} if size is None else size
a_ =do_normalize
a_ =image_mean
a_ =image_std
a_ =class_info_file
a_ =prepare_metadata(lowerCamelCase_ , lowerCamelCase_)
a_ =num_text
a_ =repo_path
# for the post_process_functions
a_ =2
a_ =1_0
a_ =1_0
a_ =3
a_ =4
a_ =num_labels
a_ =do_reduce_labels
a_ =ignore_index
def lowercase_ ( self) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
if not batched:
a_ =image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image):
a_ =image.size
else:
a_ =image.shape[1], image.shape[2]
if w < h:
a_ =int(self.size["shortest_edge"] * h / w)
a_ =self.size["""shortest_edge"""]
elif w > h:
a_ =self.size["""shortest_edge"""]
a_ =int(self.size["shortest_edge"] * w / h)
else:
a_ =self.size["""shortest_edge"""]
a_ =self.size["""shortest_edge"""]
else:
a_ =[]
for image in image_inputs:
a_ =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a_ =max(lowerCamelCase_ , key=lambda lowerCAmelCase_: item[0])[0]
a_ =max(lowerCamelCase_ , key=lambda lowerCAmelCase_: item[1])[1]
return expected_height, expected_width
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)) , )
@require_torch
@require_vision
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__magic_name__ : Dict = image_processing_class
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =OneFormerImageProcessorTester(self)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCamelCase_ , "image_std"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCamelCase_ , "size"))
self.assertTrue(hasattr(lowerCamelCase_ , "ignore_index"))
self.assertTrue(hasattr(lowerCamelCase_ , "class_info_file"))
self.assertTrue(hasattr(lowerCamelCase_ , "num_text"))
self.assertTrue(hasattr(lowerCamelCase_ , "repo_path"))
self.assertTrue(hasattr(lowerCamelCase_ , "metadata"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_reduce_labels"))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_="np") -> Union[str, Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# prepare image and target
a_ =self.image_processing_tester.num_labels
a_ =None
a_ =None
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_)
if with_segmentation_maps:
a_ =num_labels
if is_instance_map:
a_ =list(range(lowerCamelCase_)) * 2
a_ =dict(enumerate(lowerCamelCase_))
a_ =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0])).astype(np.uinta) for img in image_inputs
]
if segmentation_type == "pil":
a_ =[Image.fromarray(lowerCamelCase_) for annotation in annotations]
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , lowerCamelCase_ , return_tensors="pt" , instance_id_to_semantic_id=lowerCamelCase_ , pad_and_return_pixel_mask=lowerCamelCase_ , )
return inputs
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
def common(lowerCAmelCase_=False , lowerCAmelCase_=None):
a_ =self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase_ , is_instance_map=lowerCamelCase_ , segmentation_type=lowerCamelCase_)
a_ =inputs["""mask_labels"""]
a_ =inputs["""class_labels"""]
a_ =inputs["""pixel_values"""]
a_ =inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
self.assertEqual(mask_label.shape[0] , class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:])
self.assertEqual(len(lowerCamelCase_) , self.image_processing_tester.num_text)
common()
common(is_instance_map=lowerCamelCase_)
common(is_instance_map=lowerCamelCase_ , segmentation_type="pil")
common(is_instance_map=lowerCamelCase_ , segmentation_type="pil")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =np.zeros((2_0, 5_0))
a_ =1
a_ =1
a_ =1
a_ =binary_mask_to_rle(lowerCamelCase_)
self.assertEqual(len(lowerCamelCase_) , 4)
self.assertEqual(rle[0] , 2_1)
self.assertEqual(rle[1] , 4_5)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =fature_extractor.post_process_semantic_segmentation(lowerCamelCase_)
self.assertEqual(len(lowerCamelCase_) , self.image_processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ =[(1, 4) for i in range(self.image_processing_tester.batch_size)]
a_ =fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ , target_sizes=lowerCamelCase_)
self.assertEqual(segmentation[0].shape , target_sizes[0])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =image_processor.post_process_instance_segmentation(lowerCamelCase_ , threshold=0)
self.assertTrue(len(lowerCamelCase_) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]) , lowerCamelCase_)
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =image_processor.post_process_panoptic_segmentation(lowerCamelCase_ , threshold=0)
self.assertTrue(len(lowerCamelCase_) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]) , lowerCamelCase_)
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width))
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( lowercase_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =CTRLTokenizer
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : int =False
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__lowercase= dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__lowercase= ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__lowercase= {'unk_token': '<unk>'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _A (self , lowerCAmelCase ):
__lowercase= 'adapt react readapt apt'
__lowercase= 'adapt react readapt apt'
return input_text, output_text
def _A (self ):
__lowercase= CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase= 'adapt react readapt apt'
__lowercase= 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__lowercase= tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 230
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[Any]=None , ) -> str:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_labels
_UpperCamelCase =num_choices
_UpperCamelCase =scope
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
if self.use_labels:
_UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
_UpperCamelCase =NystromformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =NystromformerForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
_UpperCamelCase =NystromformerForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> List[str]:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> str:
_UpperCamelCase =self.num_choices
_UpperCamelCase =NystromformerForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase =self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : Any ) -> int:
_UpperCamelCase =NystromformerModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase__ ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase =type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> Any:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> str:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] ) -> Dict:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : int ) -> List[str]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase =NystromformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : List[Any] ) -> int:
_UpperCamelCase =NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCamelCase =model(UpperCamelCase__ )[0]
_UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
_UpperCamelCase =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase ='''the [MASK] of Belgium is Brussels'''
_UpperCamelCase =AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =tokenizer(UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
_UpperCamelCase =model(encoding.input_ids ).logits
_UpperCamelCase =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , '''capital''' )
| 404
| 0
|
"""simple docstring"""
def __lowercase ( a : int , a : float , a : float ) -> str:
return round(float(moles / volume ) * nfactor )
def __lowercase ( a : float , a : float , a : float ) -> Any:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __lowercase ( a : float , a : float , a : float ) -> Tuple:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __lowercase ( a : float , a : float , a : float ) -> Optional[int]:
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __lowercase ( a : Any , a : List[Any] ) -> Dict:
__snake_case : Any =checkpoint
__snake_case : Dict ={}
__snake_case : List[Any] =vae_state_dict['''encoder.conv_in.weight''']
__snake_case : List[str] =vae_state_dict['''encoder.conv_in.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.weight''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.bias''']
__snake_case : str =vae_state_dict['''encoder.norm_out.weight''']
__snake_case : str =vae_state_dict['''encoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_in.weight''']
__snake_case : str =vae_state_dict['''decoder.conv_in.bias''']
__snake_case : List[str] =vae_state_dict['''decoder.conv_out.weight''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_out.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''decoder.norm_out.weight''']
__snake_case : List[str] =vae_state_dict['''decoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''quant_conv.weight''']
__snake_case : List[str] =vae_state_dict['''quant_conv.bias''']
__snake_case : Optional[int] =vae_state_dict['''post_quant_conv.weight''']
__snake_case : Tuple =vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case : Union[str, Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case : Union[str, Any] ={
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
__snake_case : List[Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case : List[Any] ={
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a )
}
for i in range(a ):
__snake_case : Any =[key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__snake_case : Optional[Any] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
__snake_case : Optional[int] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
__snake_case : Optional[Any] =renew_vae_resnet_paths(a )
__snake_case : Union[str, Any] ={'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Tuple =[key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case : Any =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Tuple =[key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : List[str] ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : int =[key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case : List[Any] =renew_vae_attention_paths(a )
__snake_case : Union[str, Any] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
__snake_case : List[Any] =num_up_blocks - 1 - i
__snake_case : Dict =[
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__snake_case : int =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
__snake_case : Optional[Any] =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : int ={'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Any =[key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case : Dict =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Dict =[key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
__snake_case : Tuple =renew_vae_resnet_paths(a )
__snake_case : Tuple ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Optional[Any] =[key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case : Dict =renew_vae_attention_paths(a )
__snake_case : List[str] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def __lowercase ( a : str , a : str , ) -> Optional[int]:
# Only support V1
__snake_case : Union[str, Any] =requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case : Union[str, Any] =io.BytesIO(r.content )
__snake_case : Any =OmegaConf.load(a )
__snake_case : Union[str, Any] =512
__snake_case : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case : Any ={}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case : Optional[int] =f.get_tensor(a )
else:
__snake_case : Optional[int] =torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
__snake_case : Dict =create_vae_diffusers_config(a , image_size=a )
__snake_case : Union[str, Any] =custom_convert_ldm_vae_checkpoint(a , a )
__snake_case : str =AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCamelCase_ : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 497
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :int = size if size is not None else {'shortest_edge': 2_5_6}
lowerCAmelCase__ :Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Optional[int] = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase__ :Optional[Any] = do_resize
lowerCAmelCase__ :Dict = size
lowerCAmelCase__ :Tuple = resample
lowerCAmelCase__ :Any = do_center_crop
lowerCAmelCase__ :Tuple = crop_size
lowerCAmelCase__ :List[Any] = do_rescale
lowerCAmelCase__ :Any = rescale_factor
lowerCAmelCase__ :Tuple = do_normalize
lowerCAmelCase__ :Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ :Any = get_resize_output_image_size(__UpperCAmelCase , size=size['shortest_edge'] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :int = size if size is not None else self.size
lowerCAmelCase__ :Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ :List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Optional[int] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase__ :Tuple = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ :Union[str, Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[str] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Dict = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = target_sizes.numpy()
lowerCAmelCase__ :List[str] = []
for idx in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ :Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[str] = logits.argmax(dim=1 )
lowerCAmelCase__ :Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 93
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Dict=2 , _A : Optional[Any]=32 , _A : List[str]=16 , _A : str=3 , _A : List[Any]=True , _A : Optional[int]=True , _A : Optional[int]=32 , _A : Optional[Any]=4 , _A : Optional[int]=[0, 1, 2, 3] , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Optional[Any]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=0.02 , _A : Optional[Any]=3 , _A : Union[str, Any]=[1, 3_84, 24, 24] , _A : int=True , _A : int=None , ) -> Tuple:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : int = backbone_out_indices
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = backbone_featmap_shape
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = DPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , _A : str , _A : Tuple , _A : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Any = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : List[str] , _A : str , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Any = DPTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> str:
pass
def A ( self : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_A )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def A ( self : Tuple ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
if model_class in get_values(_A ):
continue
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : Tuple = model(**_A ).loss
loss.backward()
def A ( self : Union[str, Any] ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase_ : int = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : List[str] = model(**_A ).loss
loss.backward()
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=_A )
# Skip the check for the backbone
UpperCAmelCase_ : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase_ : Union[str, Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> Optional[Any]:
pass
@slow
def A ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase_ : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Union[str, Any] ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = '''add'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[Any] = DPTForDepthEstimation(_A )
def __UpperCAmelCase ( ) -> List[Any]:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase_ : Any = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_A )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**_A )
UpperCAmelCase_ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase_ : str = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _A )
UpperCAmelCase_ : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _A , atol=1e-4 ) )
| 541
| 0
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowerCAmelCase = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : int , _A : Optional[Any] , _A : Union[str, Any]="<s>" , _A : List[Any]="</s>" , _A : Tuple="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
_UpperCamelCase = len(self.sp_model ) + self.fairseq_offset
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , _A : Optional[int] ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def UpperCamelCase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Optional[int] , _A : str ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self : Dict , _A : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Any , _A : int ):
_UpperCamelCase = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 10
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: str = ReformerTokenizer
a_: Any = ReformerTokenizerFast
a_: Union[str, Any] = True
a_: int = False
a_: List[Any] = True
def lowerCAmelCase__ ( self : str ):
super().setUp()
_lowerCAmelCase =ReformerTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase ="""<s>"""
_lowerCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCamelCase_ ) , 1000 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self : Tuple ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ="""I was born in 92000, and this is falsé."""
_lowerCAmelCase =tokenizer.tokenize(lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase =self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# Simple input
_lowerCAmelCase ="""This is a simple input"""
_lowerCAmelCase =["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase =("""This is a simple input""", """This is a pair""")
_lowerCAmelCase =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =ReformerTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase ="""Hello World!"""
_lowerCAmelCase =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowerCAmelCase =list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase =""" """.join(lowerCamelCase_ )
_lowerCAmelCase =self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors="""pt""" )
_lowerCAmelCase =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
_lowerCAmelCase =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowerCAmelCase =encoded_sequence["""input_ids"""].shape
_lowerCAmelCase =ReformerModel(lowerCamelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Dict ):
# fmt: off
_lowerCAmelCase ={"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowerCAmelCase =[
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=lowerCamelCase_ , sequences=lowerCamelCase_ , )
| 149
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''image_mean'''))
self.assertTrue(hasattr(__a , '''image_std'''))
self.assertTrue(hasattr(__a , '''do_normalize'''))
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''size'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 19
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=1_0 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1_0 , _lowerCAmelCase=0.02 , _lowerCAmelCase="divided_space_time" , _lowerCAmelCase=None , ):
_lowercase : Optional[int] = parent
_lowercase : str = batch_size
_lowercase : Optional[Any] = image_size
_lowercase : Dict = num_channels
_lowercase : Tuple = patch_size
_lowercase : Any = num_frames
_lowercase : str = is_training
_lowercase : Optional[int] = use_labels
_lowercase : Optional[Any] = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = attention_type
_lowercase : str = initializer_range
_lowercase : Union[str, Any] = scope
_lowercase : int = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def __a ( self ):
_lowercase : Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __a ( self ):
_lowercase : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowercase : Dict = self.num_labels
return config
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TimesformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TimesformerForVideoClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[int] = model(_lowerCAmelCase )
# verify the logits shape
_lowercase : int = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : List[Any] = False
def __a ( self ):
_lowercase : int = TimesformerModelTester(self )
_lowercase : List[str] = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def __a ( self ):
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(_lowerCAmelCase )
_lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Tuple = [*signature.parameters.keys()]
_lowercase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = TimesformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a ( self ):
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = True
for model_class in self.all_model_classes:
_lowercase : int = self.model_tester.seq_length
_lowercase : Optional[Any] = self.model_tester.num_frames
_lowercase : Dict = True
_lowercase : List[str] = False
_lowercase : List[str] = True
_lowercase : Any = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : str = True
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Dict = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowercase : str = len(_lowerCAmelCase )
# Check attention is always last and order is fine
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = True
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
_lowercase : Dict = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __a ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Any = outputs.hidden_states
_lowercase : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
_lowercase : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowercase : Union[str, Any] = np.load(SCREAMING_SNAKE_CASE )
return list(SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self ):
_lowercase : List[Any] = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_lowerCAmelCase )
_lowercase : Optional[int] = self.default_image_processor
_lowercase : Dict = prepare_video()
_lowercase : Optional[Any] = image_processor(video[:8] , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Tuple = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : List[str] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 677
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
_lowercase = 42
_lowercase = None
class A__ ( A__ , A__ ):
"""simple docstring"""
_lowercase = 2
@register_to_config
def __init__( self : List[Any] , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : float = 100 , lowerCamelCase__ : float = 1.007 , lowerCamelCase__ : float = 80 , lowerCamelCase__ : float = 0.05 , lowerCamelCase__ : float = 50 , ):
# standard deviation of the initial noise distribution
a__ : Union[str, Any] = sigma_max
# setable values
a__ : int = None
a__ : np.IntTensor = None
a__ : torch.FloatTensor = None # sigma(t_i)
def _UpperCamelCase( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[int] = None ):
return sample
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, torch.device] = None ):
a__ : int = num_inference_steps
a__ : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a__ : List[Any] = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a__ : Dict = torch.tensor(lowerCamelCase__ , dtype=torch.floataa , device=lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
a__ : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a__ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ : Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase__ ).to(sample.device )
a__ : str = sigma + gamma * sigma
a__ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True , ):
a__ : str = sample_hat + sigma_hat * model_output
a__ : str = (sample_hat - pred_original_sample) / sigma_hat
a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True , ):
a__ : int = sample_prev + sigma_prev * model_output
a__ : Any = (sample_prev - pred_original_sample) / sigma_prev
a__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
raise NotImplementedError()
| 37
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase : Union[str, Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase : int = json.load(f)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[Any] = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def __UpperCamelCase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = F"""facebook/wmt19-{pair}"""
lowerCamelCase_ : Optional[Any] = self.get_tokenizer(UpperCamelCase_ )
lowerCamelCase_ : str = self.get_model(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = bleu_data[pair]['''src''']
lowerCamelCase_ : int = bleu_data[pair]['''tgt''']
lowerCamelCase_ : int = tokenizer(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ , padding='''longest''' ).to(UpperCamelCase_ )
lowerCamelCase_ : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase_ : int = tokenizer.batch_decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
lowerCamelCase_ : Tuple = calculate_bleu(UpperCamelCase_ , UpperCamelCase_ )
print(UpperCamelCase_ )
self.assertGreaterEqual(scores['''bleu'''] , UpperCamelCase_ )
| 418
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCamelCase : Union[str, Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__lowerCamelCase : Union[str, Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__lowerCamelCase : Union[str, Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="binary" ):
"""simple docstring"""
lowerCamelCase_ : str = simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = float(fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase , average=__UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = {}
for id_pred, label in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : List[str] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCamelCase_ : List[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ : Optional[Any] = [(pred, label)]
lowerCamelCase_ , lowerCamelCase_ : List[str] = [], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ : Any = zip(*__UpperCAmelCase )
lowerCamelCase_ : Dict = fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase , average='''macro''' )
fas.append(__UpperCAmelCase )
lowerCamelCase_ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCAmelCase ) )
ems.append(__UpperCAmelCase )
lowerCamelCase_ : int = float(sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) )
lowerCamelCase_ : str = sum(__UpperCAmelCase ) / len(__UpperCAmelCase )
lowerCamelCase_ : str = float(fa_score(y_true=__UpperCAmelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCamelCase_ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCamelCase_ : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 418
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 469
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''lxmert'''
snake_case_ = {}
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=9_500 , lowerCamelCase__=1_600 , lowerCamelCase__=400 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=9 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=2_048 , lowerCamelCase__=4 , lowerCamelCase__=6.67 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**lowerCamelCase__ )
| 469
| 1
|
from __future__ import annotations
from typing import TypedDict
class A ( __lowercase ):
_snake_case =42
_snake_case =42
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowercase__ ) )]
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase_ =all_rotations(lowercase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase__ ),
}
return response
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase_ =int(lowercase__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowercase__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase_ =[""] * len(lowercase__ )
for _ in range(len(lowercase__ ) ):
for i in range(len(lowercase__ ) ):
UpperCAmelCase_ =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowercase : Optional[int] ="""Provide a string that I will generate its BWT transform: """
__lowercase : List[str] =input(entry_msg).strip()
__lowercase : str =bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
__lowercase : List[Any] =reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 550
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Any ={
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple =[
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __magic_name__ ( A__ ):
lowercase : str ='''data2vec-vision'''
def __init__( self : str , UpperCamelCase__ : List[str]=7_68 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : int=30_72 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=1e-1_2 , UpperCamelCase__ : str=2_24 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : str=False , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=[3, 5, 7, 11] , UpperCamelCase__ : Dict=[1, 2, 3, 6] , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=0.4 , UpperCamelCase__ : Optional[Any]=2_56 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]=2_55 , **UpperCamelCase__ : Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class __magic_name__ ( A__ ):
lowercase : Optional[int] =version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 323
| 1
|
import requests
_snake_case : List[str] = "YOUR API KEY"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = giphy_api_key ):
__snake_case : Dict = "+".join(query.split() )
__snake_case : Optional[int] = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
__snake_case : Optional[int] = requests.get(__lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 705
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCAmelCase_ ( __lowerCamelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCamelCase , "_dynamo" ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = True ):
__snake_case : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__snake_case : Tuple = is_compiled_module(__lowerCamelCase )
if is_compiled:
__snake_case : Tuple = model
__snake_case : List[str] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = model.module
if not keep_fpaa_wrapper:
__snake_case : Tuple = getattr(__lowerCamelCase , "forward" )
__snake_case : int = model.__dict__.pop("_original_forward" , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , "__wrapped__" ):
__snake_case : str = forward.__wrapped__
if forward == original_forward:
break
__snake_case : Dict = forward
if getattr(__lowerCamelCase , "_converted_to_transformer_engine" , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
__snake_case : Tuple = model
__snake_case : str = compiled_model
return model
def lowerCAmelCase_ ( ):
PartialState().wait_for_everyone()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def lowerCAmelCase_ ( **__lowerCamelCase ):
for key, value in kwargs.items():
__snake_case : Union[str, Any] = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCAmelCase_ ( __lowerCamelCase ):
if not hasattr(__lowerCamelCase , "__qualname__" ) and not hasattr(__lowerCamelCase , "__name__" ):
__snake_case : Dict = getattr(__lowerCamelCase , "__class__" , __lowerCamelCase )
if hasattr(__lowerCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCamelCase , "__name__" ):
return obj.__name__
return str(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
__snake_case : Union[str, Any] = value
return destination
def lowerCAmelCase_ ( __lowerCamelCase = None ):
if port is None:
__snake_case : List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 203
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = int(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = t // 36_00, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int=3_00 ) -> str:
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase_ = f"""{elt:.6f}""" if isinstance(snake_case_ , snake_case_ ) else str(snake_case_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __A :
a__ : Dict = 5
a__ : Any = 0.2
def __init__(self : Optional[Any] , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 300 , ):
UpperCAmelCase_ = total
UpperCAmelCase_ = "" if prefix is None else prefix
UpperCAmelCase_ = leave
UpperCAmelCase_ = parent
UpperCAmelCase_ = width
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : str , __a : int , __a : bool = False , __a : str = None ):
UpperCAmelCase_ = value
if comment is not None:
UpperCAmelCase_ = comment
if self.last_value is None:
UpperCAmelCase_ = UpperCAmelCase_ = time.time()
UpperCAmelCase_ = UpperCAmelCase_ = value
UpperCAmelCase_ = UpperCAmelCase_ = None
UpperCAmelCase_ = self.warmup
UpperCAmelCase_ = 1
self.update_bar(__a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase_ = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase_ = None
if value >= self.total:
UpperCAmelCase_ = self.total
UpperCAmelCase_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase_ = self.average_time_per_item * (self.total - value)
self.update_bar(__a )
UpperCAmelCase_ = value
UpperCAmelCase_ = current_time
if self.average_time_per_item is None:
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase (self : Optional[int] , __a : Tuple , __a : List[Any]=None ):
UpperCAmelCase_ = " " * (len(str(self.total ) ) - len(str(__a ) )) + str(__a )
if self.elapsed_time is None:
UpperCAmelCase_ = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
UpperCAmelCase_ = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
UpperCAmelCase_ = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def _lowercase (self : Dict ):
UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase (self : Optional[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : List[Any] , __a : Dict=None ):
super().__init__(__a )
UpperCAmelCase_ = None if column_names is None else [column_names]
UpperCAmelCase_ = None
def _lowercase (self : str ):
UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
if self.inner_table is None:
UpperCAmelCase_ = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a )
UpperCAmelCase_ = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase (self : Dict , __a : Optional[int] , __a : int=None , __a : int=300 ):
UpperCAmelCase_ = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a )
return self.child_bar
def _lowercase (self : Dict ):
UpperCAmelCase_ = None
self.display()
class __A ( UpperCamelCase__ ):
def __init__(self : Dict ):
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = False
def _lowercase (self : int , __a : Optional[Any] , __a : Any , __a : List[str] , **__a : Union[str, Any] ):
UpperCAmelCase_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
UpperCAmelCase_ = NotebookTrainingTracker(state.max_steps , __a )
def _lowercase (self : Union[str, Any] , __a : str , __a : Dict , __a : Dict , **__a : List[Any] ):
UpperCAmelCase_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
UpperCAmelCase_ = False
def _lowercase (self : List[Any] , __a : str , __a : Optional[Any] , __a : Optional[int] , __a : Tuple=None , **__a : List[str] ):
if not has_length(__a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase_ = self.training_tracker.add_child(len(__a ) )
else:
UpperCAmelCase_ = NotebookProgressBar(len(__a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase (self : int , __a : List[str] , __a : Union[str, Any] , __a : Optional[int] , **__a : Tuple ):
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase_ = None
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : List[Any] , __a : Tuple=None , **__a : List[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase_ = state.global_step
self.training_tracker.write_line(__a )
def _lowercase (self : Optional[int] , __a : str , __a : Tuple , __a : Optional[int] , __a : Union[str, Any]=None , **__a : List[Any] ):
if self.training_tracker is not None:
UpperCAmelCase_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase_ = log["loss"]
break
if self.first_column == "Epoch":
UpperCAmelCase_ = int(state.epoch )
else:
UpperCAmelCase_ = state.global_step
UpperCAmelCase_ = "eval"
for k in metrics:
if k.endswith("_loss" ):
UpperCAmelCase_ = re.sub(r"\_loss$" , "" , __a )
UpperCAmelCase_ = metrics.pop("total_flos" , __a )
UpperCAmelCase_ = metrics.pop("epoch" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_runtime""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __a )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
UpperCAmelCase_ = v
else:
UpperCAmelCase_ = k.split("_" )
UpperCAmelCase_ = " ".join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase_ = v
self.training_tracker.write_line(__a )
self.training_tracker.remove_child()
UpperCAmelCase_ = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase_ = True
def _lowercase (self : Dict , __a : Dict , __a : Any , __a : Optional[Any] , **__a : Union[str, Any] ):
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__a )
UpperCAmelCase_ = None
| 78
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
return EnvironmentCommand()
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : ArgumentParser ) -> List[str]:
_A = parser.add_parser('env' )
download_parser.set_defaults(func=UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_A = huggingface_hub.__version__
_A = 'not installed'
_A = 'NA'
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = 'not installed'
if is_transformers_available():
import transformers
_A = transformers.__version__
_A = 'not installed'
if is_accelerate_available():
import accelerate
_A = accelerate.__version__
_A = 'not installed'
if is_xformers_available():
import xformers
_A = xformers.__version__
_A = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : List[Any] ) -> Dict:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 107
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__lowerCAmelCase : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : List[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__lowerCAmelCase : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__lowerCAmelCase : Union[str, Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Dict = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Any , ) -> Optional[int]:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__magic_name__ = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__magic_name__ = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__magic_name__ = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__magic_name__ = len(__UpperCamelCase )
__magic_name__ = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.''' )
__magic_name__ = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""]
__magic_name__ = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""]
__magic_name__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__magic_name__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__magic_name__ = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] = 16 , UpperCamelCase__ : List[Any] = 64 , UpperCamelCase__ : Any = 4 , ) -> Tuple:
"""simple docstring"""
__magic_name__ = reader_input["""input_ids"""]
__magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3]
__magic_name__ = len(__UpperCamelCase )
__magic_name__ = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__magic_name__ = []
for doc_id in sorted_docs:
__magic_name__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__magic_name__ = sequence_ids.index(self.pad_token_id )
else:
__magic_name__ = len(__UpperCamelCase )
__magic_name__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__magic_name__ = sorted(__UpperCamelCase , key=lambda UpperCamelCase__ : x[1] , reverse=__UpperCamelCase )
__magic_name__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
__magic_name__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class UpperCAmelCase_ ( __snake_case , __snake_case ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
| 702
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
SCREAMING_SNAKE_CASE__ : Tuple = HfApi()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
# fmt: off
SCREAMING_SNAKE_CASE__ : str = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
SCREAMING_SNAKE_CASE__ : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
SCREAMING_SNAKE_CASE__ : int = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
SCREAMING_SNAKE_CASE__ : str = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
SCREAMING_SNAKE_CASE__ : int = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
SCREAMING_SNAKE_CASE__ : str = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
SCREAMING_SNAKE_CASE__ : Tuple = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
SCREAMING_SNAKE_CASE__ : Tuple = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
SCREAMING_SNAKE_CASE__ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
SCREAMING_SNAKE_CASE__ : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 85
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
lowercase = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowercase = 2
lowercase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase = i
while n % i == 0:
lowercase = n // i
i += 1
return int(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 134
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode('utf-8' ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : bytes ):
'''simple docstring'''
return baseaa.baadecode(__snake_case ).decode('utf-8' )
if __name__ == "__main__":
_UpperCamelCase : Dict = 'Hello World!'
_UpperCamelCase : List[Any] = baseaa_encode(test)
print(encoded)
_UpperCamelCase : str = baseaa_decode(encoded)
print(decoded)
| 134
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __a (unittest.TestCase):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : str = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : List[str] = do_normalize
SCREAMING_SNAKE_CASE__ : Dict = image_mean
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_std
def _a ( self ) -> Tuple:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = DPTImageProcessor if is_vision_available() else None
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = DPTImageProcessingTester(self )
@property
def _a ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 680
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 281
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowercase :
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : Any="divided_space_time" , __lowerCAmelCase : Union[str, Any]=None , ) -> str:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = attention_type
a = initializer_range
a = scope
a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a = (image_size // patch_size) ** 2
a = (num_frames) * self.num_patches_per_frame + 1
def A ( self : str ) -> int:
"""simple docstring"""
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a = self.num_labels
return config
def A ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
a = TimesformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a = TimesformerForVideoClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify the logits shape
a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = TimesformerModelTester(self )
a = ConfigTester(
self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
a = copy.deepcopy(__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def A ( self : int ) -> List[str]:
"""simple docstring"""
pass
def A ( self : Dict ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCAmelCase )
@slow
def A ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TimesformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length
a = self.model_tester.num_frames
a = True
a = False
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a = len(__lowerCAmelCase )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
a = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> str:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A ( self : int ) -> int:
"""simple docstring"""
a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCAmelCase )
a = self.default_image_processor
a = prepare_video()
a = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
from __future__ import annotations
from math import gcd
def A ( lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
return (pow(lowercase__ , 2 ) + step) % modulus
for _ in range(lowercase__ ):
# These track the position within the cycle detection logic.
UpperCamelCase__ :Dict = seed
UpperCamelCase__ :List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ :Optional[int] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :List[str] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :str = rand_fn(lowercase__ , lowercase__ , lowercase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ :str = gcd(hare - tortoise , lowercase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ :Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 45
|
'''simple docstring'''
from manim import *
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self ) -> List[str]:
lowercase : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase : str = Rectangle(height=0.25 , width=0.25 )
lowercase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase : List[str] = [mem.copy() for i in range(6 )]
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Union[str, Any] = Text("CPU" , font_size=2_4 )
lowercase : List[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
lowercase : List[Any] = [mem.copy() for i in range(4 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = Text("GPU" , font_size=2_4 )
lowercase : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
lowercase : Tuple = [mem.copy() for i in range(6 )]
lowercase : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = Text("Model" , font_size=2_4 )
lowercase : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
lowercase : Dict = []
lowercase : Tuple = []
lowercase : List[Any] = []
for i, rect in enumerate(a_ ):
rect.set_stroke(a_ )
lowercase : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 )
self.add(a_ )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ , *a_ )
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[str] = Text("Loaded Checkpoint" , font_size=2_4 )
lowercase : Optional[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a_ )
lowercase : Any = []
lowercase : int = []
for i, rect in enumerate(a_ ):
lowercase : str = fill.copy().set_fill(a_ , opacity=0.7 )
target.move_to(a_ )
ckpt_arr.append(a_ )
lowercase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
lowercase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
lowercase : Any = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
lowercase : List[Any] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowercase : Any = [meta_mem.copy() for i in range(6 )]
lowercase : Dict = [meta_mem.copy() for i in range(6 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Optional[Any] = Text("Disk" , font_size=2_4 )
lowercase : List[str] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) )
lowercase : Optional[Any] = []
for i, rect in enumerate(a_ ):
lowercase : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(FadeOut(a_ ) )
lowercase : List[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
self.play(
FadeOut(a_ , a_ , *a_ , *a_ ) , )
self.wait()
| 372
| 0
|
def a_ (_lowerCAmelCase : int )-> int:
snake_case: List[Any] = abs(_lowerCAmelCase )
snake_case: Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ (_lowerCAmelCase : int )-> int:
snake_case: Tuple = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ (_lowerCAmelCase : int )-> int:
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def a_ ()-> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
snake_case: Union[str, Any] = F"{func.__name__}({value})"
snake_case: List[str] = timeit(F"__main__.{call}" , setup="""import __main__""" )
print(F"{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 701
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__lowerCAmelCase : List[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCAmelCase : str = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def a_ (_lowerCAmelCase : Optional[Any] )-> Optional[int]:
snake_case: Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case: Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case: int = numpy_to_pil(_lowerCAmelCase )
return images
def a_ (_lowerCAmelCase : Union[str, Any] )-> Dict:
if images.ndim == 3:
snake_case: List[Any] = images[None, ...]
snake_case: str = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case: int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
snake_case: Dict = [Image.fromarray(_lowerCAmelCase ) for image in images]
return pil_images
| 164
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowercase )
__lowerCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 689
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
set_seed(7_70)
__snake_case : Union[str, Any] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__snake_case : int = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__snake_case : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__snake_case : Union[str, Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
__snake_case : Any = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
hf_hub_download(repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , local_dir=UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase__ = BarkSemanticModel
lowerCAmelCase__ = BarkSemanticConfig
lowerCAmelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase__ = BarkCoarseModel
lowerCAmelCase__ = BarkCoarseConfig
lowerCAmelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase__ = BarkFineModel
lowerCAmelCase__ = BarkFineConfig
lowerCAmelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase__ = F"{model_type}_small" if use_small else model_type
lowerCAmelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
# this is a hack
lowerCAmelCase__ = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase__ = model_args['vocab_size']
lowerCAmelCase__ = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase__ = model_args.pop('n_head' )
lowerCAmelCase__ = model_args.pop('n_embd' )
lowerCAmelCase__ = model_args.pop('n_layer' )
lowerCAmelCase__ = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase__ = ModelClass(config=UpperCamelCase_ )
lowerCAmelCase__ = GenerationConfigClass()
lowerCAmelCase__ = model_generation_config
lowerCAmelCase__ = checkpoint['model']
# fixup checkpoint
lowerCAmelCase__ = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase__ = k[len(UpperCamelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase__ = new_k.replace(UpperCamelCase_ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase__ = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase__ = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
lowerCAmelCase__ = model.num_parameters(exclude_embeddings=UpperCamelCase_ )
lowerCAmelCase__ = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase_ , 3 )} loss" )
model.eval()
model.to(UpperCamelCase_ )
del checkpoint, state_dict
return model
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[int]="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase__ = 'cpu' # do conversion on cpu
lowerCAmelCase__ = _get_ckpt_path(UpperCamelCase_ , use_small=UpperCamelCase_ )
lowerCAmelCase__ = _load_model(UpperCamelCase_ , UpperCamelCase_ , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
# load bark initial model
lowerCAmelCase__ = _bark_load_model(UpperCamelCase_ , 'cpu' , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
if model_type == "text":
lowerCAmelCase__ = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCamelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase__ = 5
lowerCAmelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase__ = bark_model(UpperCamelCase_ )[0]
lowerCAmelCase__ = model(UpperCamelCase_ )
# take last logits
lowerCAmelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase__ = 3
lowerCAmelCase__ = 8
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = bark_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkSemanticModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkCoarseModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkFineModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkConfig.from_sub_model_configs(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase__ = BarkModel(UpperCamelCase_ )
lowerCAmelCase__ = semantic
lowerCAmelCase__ = coarseAcoustic
lowerCAmelCase__ = fineAcoustic
lowerCAmelCase__ = codec
lowerCAmelCase__ = bark_generation_config
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
bark.save_pretrained(UpperCamelCase_ , repo_id=UpperCamelCase_ , push_to_hub=UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__snake_case : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 365
| 0
|
def lowerCamelCase ( UpperCamelCase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_lowerCamelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_lowerCamelCase = 1
if upper_limit > 0:
_lowerCamelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
A = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 544
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2
def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]:
_lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
_lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
_lowerCamelCase = deepcopy(self.diffusion )
_lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
_lowerCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
A = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
A = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
A = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
A = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase ( UpperCamelCase : Tuple ) -> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
return name.replace(UpperCamelCase , UpperCamelCase )
elif name.startswith(UpperCamelCase ):
return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]:
_lowerCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowerCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
_lowerCamelCase = string[6:]
elif string.startswith('net.' ):
_lowerCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowerCamelCase = string[7:]
if string.startswith('main.' ):
_lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
_lowerCamelCase = string[:2]
_lowerCamelCase = string[2:]
else:
_lowerCamelCase = string[0]
_lowerCamelCase = string[1:]
if depth == max_depth:
_lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
_lowerCamelCase = 'mid_block'
elif depth > 0 and int(UpperCamelCase ) < 7:
_lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(UpperCamelCase ) > 7:
_lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
_lowerCamelCase = convert_resconv_naming(UpperCamelCase )
elif "attentions" in new_layer:
_lowerCamelCase = convert_attn_naming(UpperCamelCase )
_lowerCamelCase = new_string_left
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
_lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int:
_lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowerCamelCase = rename(UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
_lowerCamelCase = v
return new_state_dict
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]:
if len(UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
_lowerCamelCase = v[:, :, 0]
else:
# bias
_lowerCamelCase = v
else:
# qkv matrices
_lowerCamelCase = v.shape[0]
_lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowerCamelCase = download(UpperCamelCase )
_lowerCamelCase = MODELS_MAP[model_name]['sample_rate']
_lowerCamelCase = MODELS_MAP[model_name]['sample_size']
_lowerCamelCase = Object()
_lowerCamelCase = sample_size
_lowerCamelCase = sample_rate
_lowerCamelCase = 0
_lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase )
_lowerCamelCase = diffusers_model.state_dict()
_lowerCamelCase = DiffusionUncond(UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] )
_lowerCamelCase = orig_model.diffusion_ema.eval()
_lowerCamelCase = orig_model.state_dict()
_lowerCamelCase = rename_orig_weights(UpperCamelCase )
_lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowerCamelCase = value.squeeze()
_lowerCamelCase = value
diffusers_model.load_state_dict(UpperCamelCase )
_lowerCamelCase = 1_00
_lowerCamelCase = 33
_lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(UpperCamelCase )
_lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase )
_lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1]
_lowerCamelCase = get_crash_schedule(UpperCamelCase )
_lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(33 )
_lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios
_lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} )
_lowerCamelCase = generated.clamp(-1 , 1 )
_lowerCamelCase = (generated - audio).abs().sum()
_lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase )
print('Diff max' , UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args)
| 544
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : UNetaDModel , __snake_case : DDPMScheduler , __snake_case : List[Any] , ) -> List[str]:
super().__init__()
UpperCAmelCase : Any = value_function
UpperCAmelCase : List[str] = unet
UpperCAmelCase : Dict = scheduler
UpperCAmelCase : Union[str, Any] = env
UpperCAmelCase : Dict = env.get_dataset()
UpperCAmelCase : Tuple = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : Any = {}
for key in self.data.keys():
try:
UpperCAmelCase : str = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : int = env.observation_space.shape[0]
UpperCAmelCase : Optional[Any] = env.action_space.shape[0]
def A ( self : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
return (x_in - self.means[key]) / self.stds[key]
def A ( self : Dict , __snake_case : Any , __snake_case : Dict ) -> Any:
return x_in * self.stds[key] + self.means[key]
def A ( self : Any , __snake_case : int ) -> List[Any]:
if type(__snake_case ) is dict:
return {k: self.to_torch(__snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(__snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(__snake_case , device=self.unet.device )
def A ( self : int , __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] ) -> Dict:
for key, val in cond.items():
UpperCAmelCase : List[Any] = val.clone()
return x_in
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict ) -> Any:
UpperCAmelCase : Any = x.shape[0]
UpperCAmelCase : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , __snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(__snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Any = self.value_function(x.permute(0 , 2 , 1 ) , __snake_case ).sample
UpperCAmelCase : Dict = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : Tuple = self.scheduler._get_variance(__snake_case )
UpperCAmelCase : Optional[int] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = x.detach()
UpperCAmelCase : List[str] = x + scale * grad
UpperCAmelCase : List[str] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : str = self.unet(x.permute(0 , 2 , 1 ) , __snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Optional[int] = self.scheduler.step(__snake_case , __snake_case , __snake_case , predict_epsilon=__snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Any = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : List[str] = self.to_torch(__snake_case )
return x, y
def __call__( self : Any , __snake_case : List[str] , __snake_case : Optional[Any]=64 , __snake_case : Optional[int]=32 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=0.1 ) -> Tuple:
# normalize the observations and create batch dimension
UpperCAmelCase : Optional[Any] = self.normalize(__snake_case , '''observations''' )
UpperCAmelCase : Optional[int] = obs[None].repeat(__snake_case , axis=0 )
UpperCAmelCase : Tuple = {0: self.to_torch(__snake_case )}
UpperCAmelCase : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : Union[str, Any] = randn_tensor(__snake_case , device=self.unet.device )
UpperCAmelCase : int = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : List[Any] = self.to_torch(__snake_case )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.run_diffusion(__snake_case , __snake_case , __snake_case , __snake_case )
# sort output trajectories by value
UpperCAmelCase : str = y.argsort(0 , descending=__snake_case ).squeeze()
UpperCAmelCase : Tuple = x[sorted_idx]
UpperCAmelCase : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : str = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(__snake_case , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCAmelCase : List[Any] = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , __snake_case )
UpperCAmelCase : Optional[int] = denorm_actions[selected_index, 0]
return denorm_actions
| 528
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ ( _lowerCAmelCase : SplitDict ) -> List[Any]:
UpperCAmelCase : str = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase : Dict = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase : Any = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 528
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Any = MBartTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
UpperCamelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Any = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Union[str, Any] = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "en_XX", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "ro_RO", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Tuple = src_lang
UpperCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40
|
'''simple docstring'''
import numpy as np
class __a :
def __init__( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __eq__( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
return self.position == cell.position
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.position )
class __a :
def __init__( self : int ,lowerCamelCase : Optional[int]=(5, 5) ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros(lowerCamelCase )
__SCREAMING_SNAKE_CASE = world_size[0]
__SCREAMING_SNAKE_CASE = world_size[1]
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
print(self.w )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__SCREAMING_SNAKE_CASE = cell.position[0]
__SCREAMING_SNAKE_CASE = cell.position[1]
__SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
__SCREAMING_SNAKE_CASE = current_x + n[0]
__SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (x, y)
__SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCamelCase )
return neighbours
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
_open.append(__UpperCAmelCase )
while _open:
__SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
__SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(__UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(__UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__SCREAMING_SNAKE_CASE = current.g + 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = n.position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = goal.position
__SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
__SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
__SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a = Gridworld()
# Start position and goal
a = Cell()
a = (0, 0)
a = Cell()
a = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a = 1
print(world.w)
| 109
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Dict = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = '''conditional_detr'''
__lowercase : str = ['''past_key_values''']
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A_ = backbone_config.get('''model_type''' )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(__SCREAMING_SNAKE_CASE )
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = cls_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> int:
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
return self.d_model
def __A ( self ) -> List[str]:
A_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
class __UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def __A ( self ) -> Any:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> str:
return 1E-5
@property
def __A ( self ) -> Optional[Any]:
return 12
| 716
|
'''simple docstring'''
from itertools import product
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> list[int]:
A_ = sides_number
A_ = max_face_number * dice_number
A_ = [0] * (max_total + 1)
A_ = 1
A_ = range(_UpperCamelCase, max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase, repeat=_UpperCamelCase ):
A_ = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCAmelCase ( ) -> float:
A_ = total_frequency_distribution(
sides_number=4, dice_number=9 )
A_ = total_frequency_distribution(
sides_number=6, dice_number=6 )
A_ = 0
A_ = 9
A_ = 4 * 9
A_ = 6
for peter_total in range(_UpperCamelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A_ = (4**9) * (6**6)
A_ = peter_wins_count / total_games_number
A_ = round(_UpperCamelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 0
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
SCREAMING_SNAKE_CASE__ : List[Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n'
SCREAMING_SNAKE_CASE__ : Optional[Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
SCREAMING_SNAKE_CASE__ : Tuple = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , UpperCamelCase__=False ) -> Tuple:
lowerCamelCase : Any = compute_bleu(
reference_corpus=lowerCamelCase_ , translation_corpus=lowerCamelCase_ , max_order=lowerCamelCase_ , smooth=lowerCamelCase_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 311
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
# Initialise PyTorch model
UpperCamelCase = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
UpperCamelCase = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 537
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = LDMTextToImagePipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
_snake_case = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = False
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
lowercase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
lowercase : str = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase : Tuple = CLIPTextModel(a_ )
lowercase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase : Any = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def a__ ( self , a_ , a_=0 ) -> Optional[int]:
if str(a_ ).startswith("mps" ):
lowercase : Optional[Any] = torch.manual_seed(a_ )
else:
lowercase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> List[str]:
lowercase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple = self.get_dummy_components()
lowercase : List[str] = LDMTextToImagePipeline(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : Dict = self.get_dummy_inputs(a_ )
lowercase : Dict = pipe(**a_ ).images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
lowercase : Optional[Any] = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , a_ , a_=torch.floataa , a_=0 ) -> List[str]:
lowercase : List[Any] = torch.manual_seed(a_ )
lowercase : Union[str, Any] = np.random.RandomState(a_ ).standard_normal((1, 4, 3_2, 3_2) )
lowercase : Optional[Any] = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
lowercase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> int:
lowercase : List[str] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : Dict = self.get_inputs(a_ )
lowercase : Any = pipe(**a_ ).images
lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase : List[str] = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
lowercase : str = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def a__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , a_ , a_=torch.floataa , a_=0 ) -> Any:
lowercase : List[str] = torch.manual_seed(a_ )
lowercase : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 3_2, 3_2) )
lowercase : str = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
lowercase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> int:
lowercase : Dict = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : str = self.get_inputs(a_ )
lowercase : str = pipe(**a_ ).images[0]
lowercase : Dict = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
lowercase : Any = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 711
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] = """▁"""
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = BigBirdTokenizer
_snake_case = BigBirdTokenizerFast
_snake_case = True
_snake_case = True
def a__ ( self ) -> List[Any]:
super().setUp()
lowercase : Dict = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Tuple:
lowercase : Optional[int] = "<s>"
lowercase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def a__ ( self ) -> Any:
lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(a_ ) , 1_0_0_4 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def a__ ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
lowercase : str = self.get_tokenizer()
lowercase : Optional[Any] = self.get_rust_tokenizer()
lowercase : Tuple = "I was born in 92000, and this is falsé."
lowercase : Any = tokenizer.tokenize(a_ )
lowercase : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowercase : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
lowercase : Dict = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowercase : Any = self.get_rust_tokenizer()
lowercase : Optional[int] = tokenizer.encode(a_ )
lowercase : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[int] = BigBirdTokenizer(a_ , keep_accents=a_ )
lowercase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowercase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowercase : Dict = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def a__ ( self ) -> Optional[Any]:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def a__ ( self ) -> Optional[Any]:
lowercase : List[Any] = "Hello World!"
lowercase : Optional[int] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase : int = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def a__ ( self ) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowercase : Dict = " ".join(a_ )
lowercase : int = self.big_tokenizer.encode_plus(a_ , return_tensors="pt" , return_token_type_ids=a_ )
lowercase : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a_ )
lowercase : Any = BigBirdConfig(attention_type="original_full" )
lowercase : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def a__ ( self ) -> Optional[Any]:
lowercase : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase : Any = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def a__ ( self ) -> Optional[int]:
# fmt: off
lowercase : Dict = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 425
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.