code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case ( ):
UpperCAmelCase_ : Optional[Any] = ArgumentParser("Accelerate CLI tool" ,usage="accelerate <command> [<args>]" ,allow_abbrev=A_ )
UpperCAmelCase_ : int = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=A_ )
env_command_parser(subparsers=A_ )
launch_command_parser(subparsers=A_ )
tpu_command_parser(subparsers=A_ )
test_command_parser(subparsers=A_ )
# Let's go
UpperCAmelCase_ : str = parser.parse_args()
if not hasattr(A_ ,"func" ):
parser.print_help()
exit(1 )
# Run
args.func(A_ )
if __name__ == "__main__":
main()
| 95 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( A_ ,A_ ,A_):
# Construct model
if openai_config_file == "":
UpperCamelCase__: Optional[Any] = OpenAIGPTConfig()
else:
UpperCamelCase__: List[Any] = OpenAIGPTConfig.from_json_file(A_)
UpperCamelCase__: Optional[Any] = OpenAIGPTModel(A_)
# Load weights from numpy
load_tf_weights_in_openai_gpt(A_ ,A_ ,A_)
# Save pytorch-model
UpperCamelCase__: Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(model.state_dict() ,A_)
print(F"Save configuration file to {pytorch_config_dump_path}")
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A__: Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 380 | 0 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving model to {ckpt_dir}' )
__A = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = (
os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__A = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , planner=DefaultLoadPlanner() , )
__A = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__A = os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__A = (
os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , )
__A = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__A = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
optimizer.load_state_dict(__UpperCamelCase )
| 710 |
"""simple docstring"""
lowercase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 215 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=() ,lowerCAmelCase__=None ,lowerCAmelCase__="no" ,lowerCAmelCase__="29500" ):
lowerCamelCase_ = False
lowerCamelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCamelCase_ = True
elif "IPython" in sys.modules:
lowerCamelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCamelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,lowerCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCamelCase_ = 8
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*lowerCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase__ ,master_addr='''127.0.01''' ,master_port=lowerCAmelCase__ ,mixed_precision=lowerCAmelCase__ ):
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=() ,lowerCAmelCase__=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase__ ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,debug=lowerCAmelCase__ )
start_processes(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
| 29 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430 | 0 |
import cva
import numpy as np
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> List[Any]:
if k in (0.04, 0.06):
lowerCAmelCase_ : Tuple = k
lowerCAmelCase_ : List[str] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def lowercase_ ( self , __lowercase ) -> tuple[cva.Mat, list[list[int]]]:
lowerCAmelCase_ : List[Any] = cva.imread(__lowercase , 0 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = img.shape
lowerCAmelCase_ : list[list[int]] = []
lowerCAmelCase_ : Optional[int] = img.copy()
lowerCAmelCase_ : int = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = np.gradient(__lowercase )
lowerCAmelCase_ : Optional[int] = dx**2
lowerCAmelCase_ : List[Any] = dy**2
lowerCAmelCase_ : Optional[Any] = dx * dy
lowerCAmelCase_ : Optional[Any] = 0.04
lowerCAmelCase_ : int = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
lowerCAmelCase_ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = (wxx * wyy) - (wxy**2)
lowerCAmelCase_ : Any = wxx + wyy
lowerCAmelCase_ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] =HarrisCorner(0.04, 3)
_UpperCAmelCase , _UpperCAmelCase : int =edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img) | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=3_0 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=2 , ) -> Tuple:
_a : Any = parent
_a : Any = batch_size
_a : List[str] = image_size
_a : Union[str, Any] = patch_size
_a : str = num_channels
_a : Dict = is_training
_a : Tuple = use_labels
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Tuple = intermediate_size
_a : Any = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Tuple = type_sequence_label_size
_a : List[Any] = initializer_range
_a : Tuple = scope
_a : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_a : Optional[Any] = (image_size // patch_size) ** 2
_a : List[str] = num_patches + 2
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_a : Any = DeiTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_a : List[str] = DeiTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : Optional[Any] = 1
_a : int = DeiTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_a : int = self.type_sequence_label_size
_a : List[str] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Any = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Any = 1
_a : List[str] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : int = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : List[str] = config_and_inputs
_a : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : str = False
__lowerCAmelCase : Tuple = False
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Tuple = DeiTModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Dict:
pass
def __UpperCamelCase ( self ) -> int:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __UpperCamelCase ( self ) -> List[Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(lowerCamelCase_ )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Any:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Any:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Dict:
_a : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_a : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
_a : int = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
_a : Union[str, Any] = model(**lowerCamelCase_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> Dict:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_a : Tuple = False
_a : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_a : Optional[int] = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
_a : List[Any] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
_a : Union[str, Any] = model(**lowerCamelCase_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> List[Any]:
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
_a : List[str] = problem_type['title']
_a : Optional[int] = problem_type['num_labels']
_a : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
_a : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if problem_type["num_labels"] > 1:
_a : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_a : Dict = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase_ ) as warning_list:
_a : str = model(**lowerCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __UpperCamelCase ( self ) -> List[Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = DeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : str = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCamelCase_ )
_a : Optional[int] = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Dict = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_a : Tuple = model(**lowerCamelCase_ )
# verify the logits
_a : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_a : Union[str, Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self ) -> str:
_a : List[str] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
_a : Tuple = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
_a : str = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a : Dict = model(lowerCamelCase_ )
| 120 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_a : str = XLMProphetNetForConditionalGenerationOld.from_pretrained(A )
_a , _a : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
A , output_loading_info=A )
else:
_a : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(A )
_a , _a : Dict = ProphetNetForConditionalGeneration.from_pretrained(
A , output_loading_info=A )
_a : Tuple = ['key_proj', 'value_proj', 'query_proj']
_a : str = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_a : Tuple = key.split('.' )
if attributes[0] == "lm_head":
_a : str = prophet
_a : Dict = prophet_old
else:
_a : Union[str, Any] = prophet.prophetnet
_a : List[Any] = prophet_old.model
_a : Dict = False
for attribute in attributes:
if attribute in mapping:
_a : List[str] = mapping[attribute]
if not hasattr(A , A ) and len(A ) > 0:
_a : Any = attribute
elif hasattr(A , A ):
_a : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : str = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_a : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : int = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_a : Optional[int] = True
break
elif attribute in special_keys and hasattr(A , 'in_proj_weight' ):
_a : Optional[int] = old_model.in_proj_weight.shape[0] // 3
_a : List[str] = getattr(A , A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : Optional[int] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
_a : Tuple = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
_a : Any = True
break
if attribute.isdigit():
_a : List[str] = model[int(A )]
_a : Union[str, Any] = old_model[int(A )]
else:
_a : List[Any] = getattr(A , A )
if old_attribute == "":
_a : List[str] = old_model
else:
if not hasattr(A , A ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_a : Optional[int] = getattr(A , A )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 120 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = None
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Tuple = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__ )
# tweak scores to not be uniform anymore
_UpperCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Optional[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Tuple = jax.nn.softmax(lowerCAmelCase__ , axis=-1 )
_UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Optional[int] = 2
# create ramp distribution
_UpperCamelCase : int = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Any = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : int = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Any = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[Any] = 5
_UpperCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Tuple = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = None
_UpperCamelCase : str = 10
_UpperCamelCase : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : Any = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Optional[int] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : int = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Optional[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : int = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : List[str] = 0
_UpperCamelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
# check that min length is applied at length 5
_UpperCamelCase : str = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : Dict = 5
_UpperCamelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : int = 15
_UpperCamelCase : Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : int = 1
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[str] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : Dict = 3
_UpperCamelCase : str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Tuple = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = 5
_UpperCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : str = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[Any] = 3
_UpperCamelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Union[str, Any] = 10
_UpperCamelCase : Optional[int] = 15
_UpperCamelCase : Optional[int] = 2
_UpperCamelCase : int = 1
_UpperCamelCase : int = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
_UpperCamelCase : int = input_ids.copy()
_UpperCamelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
_UpperCamelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = 10
# no processor list
_UpperCamelCase : Optional[int] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : List[str] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# with processor list
_UpperCamelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = 4
_UpperCamelCase : List[Any] = 10
_UpperCamelCase : Optional[int] = 15
_UpperCamelCase : Any = 2
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Dict = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
_UpperCamelCase : Tuple = input_ids.copy()
_UpperCamelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Dict = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Dict = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Any = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Dict = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCamelCase : Any = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
_UpperCamelCase : Tuple = jax.jit(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = jax.jit(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : str = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 239 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def lowercase_ (*lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Dict = ObjectDetectionPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowerCAmelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase__ , {
"score": ANY(lowerCAmelCase__ ),
"label": ANY(lowerCAmelCase__ ),
"box": {"xmin": ANY(lowerCAmelCase__ ), "ymin": ANY(lowerCAmelCase__ ), "xmax": ANY(lowerCAmelCase__ ), "ymax": ANY(lowerCAmelCase__ )},
} , )
import datasets
_UpperCamelCase : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : Union[str, Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : Any = object_detector(lowerCAmelCase__ , threshold=0.0 )
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCAmelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase__ , {
"score": ANY(lowerCAmelCase__ ),
"label": ANY(lowerCAmelCase__ ),
"box": {"xmin": ANY(lowerCAmelCase__ ), "ymin": ANY(lowerCAmelCase__ ), "xmax": ANY(lowerCAmelCase__ ), "ymax": ANY(lowerCAmelCase__ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def lowercase_ (self ):
'''simple docstring'''
pass
@require_torch
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : List[Any] = AutoModelForObjectDetection.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = ObjectDetectionPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = "facebook/detr-resnet-50"
_UpperCamelCase : str = AutoModelForObjectDetection.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : List[str] = ObjectDetectionPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
_UpperCamelCase : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
_UpperCamelCase : Optional[int] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = 0.9985
_UpperCamelCase : Optional[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = pipeline("object-detection" , model=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : Dict = 0.9993
_UpperCamelCase : Optional[int] = pipeline("object-detection" , model=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
_UpperCamelCase : str = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 239 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=True , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Union[str, Any]=None , ):
"""simple docstring"""
_lowerCamelCase : int = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Dict = intermediate_multiple_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : Tuple = weight_tying
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Union[str, Any] = num_choices
_lowerCamelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = True
_lowerCamelCase : Optional[Any] = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
_lowerCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = output_from_no_past['''hidden_states'''][0]
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
snake_case__ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
snake_case__ : Union[str, Any] = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
snake_case__ : str = False
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GPTNeoXJapaneseModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = '''abeja/gpt-neox-japanese-2.7b'''
_lowerCamelCase : Any = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_lowerCamelCase : Dict = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_lowerCamelCase : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = []
for prompt in prompts:
_lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(__lowerCAmelCase , max_length=5_0 )
_lowerCamelCase : List[str] = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 83 |
'''simple docstring'''
def _a ( __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[Any] = sum(__lowerCAmelCase )
snake_case__ : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case__ : Dict = True
for i in range(1 , s + 1 ):
snake_case__ : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case__ : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case__ : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case__ : Union[str, Any] = s - 2 * j
break
return diff
| 347 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ : Optional[int] =re.compile(R'''\s+''')
def snake_case_ ( __snake_case : Union[str, Any]) -> str:
return {"hash": hashlib.mda(re.sub(__snake_case , '''''' , example['''content''']).encode('''utf-8''')).hexdigest()}
def snake_case_ ( __snake_case : int) -> int:
lowerCAmelCase_ = [len(__snake_case) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(__snake_case), "line_max": max(__snake_case)}
def snake_case_ ( __snake_case : str) -> Union[str, Any]:
lowerCAmelCase_ = np.mean([c.isalnum() for c in example['''content''']])
return {"alpha_frac": alpha_frac}
def snake_case_ ( __snake_case : Tuple , __snake_case : Optional[int]) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['''hash'''])
return True
else:
return False
def snake_case_ ( __snake_case : str , __snake_case : Optional[Any]=5) -> List[Any]:
lowerCAmelCase_ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
lowerCAmelCase_ = example['''content'''].splitlines()
for _, line in zip(range(__snake_case) , __snake_case):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case_ ( __snake_case : Optional[int] , __snake_case : List[Any]=5 , __snake_case : Union[str, Any]=0.0_5) -> List[str]:
lowerCAmelCase_ = ['''unit tests''', '''test file''', '''configuration file''']
lowerCAmelCase_ = example['''content'''].splitlines()
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
# first test
for _, line in zip(range(__snake_case) , __snake_case):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCAmelCase_ = example['''content'''].count('''\n''')
lowerCAmelCase_ = int(coeff * nlines)
for line in lines:
count_config += line.lower().count('''config''')
count_test += line.lower().count('''test''')
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case_ ( __snake_case : List[Any]) -> Optional[int]:
lowerCAmelCase_ = ['''def ''', '''class ''', '''for ''', '''while ''']
lowerCAmelCase_ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case_ ( __snake_case : List[str] , __snake_case : Tuple=4) -> Any:
lowerCAmelCase_ = example['''content'''].splitlines()
lowerCAmelCase_ = 0
for line in lines:
counter += line.lower().count('''=''')
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case_ ( __snake_case : List[str]) -> Union[str, Any]:
lowerCAmelCase_ = tokenizer(example['''content'''] , truncation=__snake_case)['''input_ids''']
lowerCAmelCase_ = len(example['''content''']) / len(__snake_case)
return {"ratio": ratio}
def snake_case_ ( __snake_case : Any) -> int:
lowerCAmelCase_ = {}
results.update(get_hash(__snake_case))
results.update(line_stats(__snake_case))
results.update(alpha_stats(__snake_case))
results.update(char_token_ratio(__snake_case))
results.update(is_autogenerated(__snake_case))
results.update(is_config_or_test(__snake_case))
results.update(has_no_keywords(__snake_case))
results.update(has_few_assignments(__snake_case))
return results
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any]) -> Optional[int]:
if not check_uniques(__snake_case , __snake_case):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case_ ( __snake_case : Any) -> str:
with open(__snake_case , '''rb''') as f_in:
with gzip.open(str(__snake_case) + '''.gz''' , '''wb''' , compresslevel=6) as f_out:
shutil.copyfileobj(__snake_case , __snake_case)
os.unlink(__snake_case)
# Settings
A_ : List[Any] =HfArgumentParser(PreprocessingArguments)
A_ : Union[str, Any] =parser.parse_args()
if args.num_workers is None:
A_ : List[str] =multiprocessing.cpu_count()
A_ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ : int =time.time()
A_ : List[str] =load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ : Optional[int] =time.time()
A_ : Union[str, Any] =ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ : List[str] =set(ds.unique('''hash'''))
A_ : Union[str, Any] =len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ : Optional[Any] =time.time()
A_ : List[Any] =ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ : Tuple =time.time()
A_ , A_ : List[str] =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ : str =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ : Optional[Any] =output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ : str =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ : Optional[Any] =str(data_dir / f'''file-{file_number+1:012}.json''')
A_ : Optional[int] =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 606 | '''simple docstring'''
def snake_case_ ( __snake_case : int) -> list:
lowerCAmelCase_ = int(__snake_case)
if n_element < 1:
lowerCAmelCase_ = ValueError('''a should be a positive number''')
raise my_error
lowerCAmelCase_ = [1]
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = (0, 0, 0)
lowerCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5))
index += 1
return hamming_list
if __name__ == "__main__":
A_ : Union[str, Any] =input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
A_ : Optional[int] =hamming(int(n))
print('''-----------------------------------------------------''')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 606 | 1 |
import math
import os
import sys
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = """"""
try:
with open(__A ,"""rb""" ) as binary_file:
__snake_case = binary_file.read()
for dat in data:
__snake_case = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase__ ( __A :dict[str, str] ,__A :str ,__A :int ,__A :str ):
"""simple docstring"""
lexicon.pop(__A )
__snake_case = last_match_id
if math.loga(__A ).is_integer():
for curr_key in lexicon:
__snake_case = """0""" + lexicon[curr_key]
__snake_case = bin(__A )[2:]
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = {"""0""": """0""", """1""": """1"""}
__snake_case , __snake_case = """""", """"""
__snake_case = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__A ,__A ,__A ,__A )
index += 1
__snake_case = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = os.path.getsize(__A )
__snake_case = bin(__A )[2:]
__snake_case = len(__A )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = 8
try:
with open(__A ,"""wb""" ) as opened_file:
__snake_case = [
to_write[i : i + byte_length]
for i in range(0 ,len(__A ) ,__A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__A ,2 ).to_bytes(1 ,byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = read_file_binary(__A )
__snake_case = compress_data(__A )
__snake_case = add_file_length(__A ,__A )
write_file_binary(__A ,__A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 268 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( __A :Optional[int] ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class __snake_case ( snake_case__ ):
"""simple docstring"""
@staticmethod
def a ( _UpperCamelCase ) -> Any:
"""simple docstring"""
__snake_case = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
__snake_case = model
__snake_case = cache
__snake_case = force
__snake_case = trust_remote_code
def a ( self ) -> List[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 268 | 1 |
'''simple docstring'''
def _a ( _SCREAMING_SNAKE_CASE : int ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (1 << p) - 1
for _ in range(p - 2 ):
_SCREAMING_SNAKE_CASE = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 493 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 493 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCAmelCase_ ( UpperCamelCase__ : int ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowerCAmelCase_ ( UpperCamelCase__ : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class lowerCamelCase__ :
a : int
a : str
class lowerCamelCase__ ( _a ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = {}
__lowercase = []
__lowercase = 1
__lowercase = [1, 2]
__lowercase = {"""a""": 1, """b""": 2}
__lowercase = {"""a""": [1, 2], """b""": [3, 4]}
__lowercase = {"""a""": {"""1""": 1}, """b""": 2}
__lowercase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowercase = {}
__lowercase = []
__lowercase = 2
__lowercase = [2, 3]
__lowercase = {"""a""": 2, """b""": 3}
__lowercase = {"""a""": [2, 3], """b""": [4, 5]}
__lowercase = {"""a""": {"""1""": 2}, """b""": 3}
__lowercase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
__lowercase = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
__lowercase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
__lowercase = {"""a""": 2, """b""": 0, """c""": 2}
__lowercase = {
"""a""": np.eye(2 ).astype(A_ ),
"""b""": np.zeros(3 ).astype(A_ ),
"""c""": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = {"""a""": 1, """b""": 2}
__lowercase = {"""a""": 3, """b""": 4}
__lowercase = {"""a""": 5, """b""": 6}
__lowercase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
class lowerCamelCase__ :
a : int = """bar"""
__lowercase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(A_ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
__lowercase = {f'''{i}''': i for i in range(UpperCamelCase__ )}
__lowercase = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCamelCase__ ( _a ):
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
__lowercase = layers.Dense(2 )
def gen_random_output():
__lowercase = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(4_2 , set_tensorflow=A_ ):
__lowercase = gen_random_output()
with temp_seed(4_2 , set_tensorflow=A_ ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
import torch
def gen_random_output():
__lowercase = torch.nn.Linear(3 , 2 )
__lowercase = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=A_ ):
__lowercase = gen_random_output()
with temp_seed(4_2 , set_pytorch=A_ ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
__lowercase = gen_random_output()
with temp_seed(4_2 ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = A(x=1 , y="""foobar""" )
__lowercase = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCamelCase__ ) == expected_output
__lowercase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
__lowercase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
return text.split()
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCAmelCase_ ( ):
"""simple docstring"""
with Pool(2 ) as pool:
__lowercase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowercase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowercase = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCamelCase__ ) == 4
| 616 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , *UpperCamelCase: Optional[int] , **UpperCamelCase: Tuple ):
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 500 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ : Optional[int] = '''\
'''
snake_case_ : Optional[Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
snake_case_ : str = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def a ( self , A_ , A_ , A_ = 16 , A_ = True , A_=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCamelCase = "cuda"
else:
_UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCamelCase = AutoModelForCausalLM.from_pretrained(A_ )
_UpperCamelCase = model.to(A_ )
_UpperCamelCase = AutoTokenizer.from_pretrained(A_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCamelCase = model.config.max_length - 1
else:
_UpperCamelCase = model.config.max_length
_UpperCamelCase = tokenizer(
A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors="pt" , return_attention_mask=A_ , ).to(A_ )
_UpperCamelCase = encodings["input_ids"]
_UpperCamelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCamelCase = []
_UpperCamelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(A_ ) , A_ ) ):
_UpperCamelCase = min(start_index + batch_size , len(A_ ) )
_UpperCamelCase = encoded_texts[start_index:end_index]
_UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A_ )
_UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A_ ), attn_mask] , dim=1 )
_UpperCamelCase = encoded_batch
with torch.no_grad():
_UpperCamelCase = model(A_ , attention_mask=A_ ).logits
_UpperCamelCase = out_logits[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = attn_mask[..., 1:].contiguous()
_UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A_ )}
| 138 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( a__ ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self : Optional[Any],lowercase_ : List[Any],lowercase_ : Tuple )-> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A,scheduler=_A )
@torch.no_grad()
def __call__( self : str,lowercase_ : int = 1,lowercase_ : Union[str, Any] = 5_0,lowercase_ : Dict = None,lowercase_ : Optional[Any] = "pil",lowercase_ : int = True,**lowercase_ : Optional[int],)-> List[Any]:
'''simple docstring'''
A__ = self.unet.config.sample_size
A__ = (batch_size, 3, img_size, img_size)
A__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ = randn_tensor(_A,generator=_A,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ = self.scheduler.schedule[t]
A__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ = self.scheduler.add_noise_to_input(_A,_A,generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_hat / 2) * model((sample_hat + 1) / 2,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ = self.scheduler.step(_A,_A,_A,_A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2,sigma_prev / 2 ).sample
A__ = self.scheduler.step_correct(
_A,_A,_A,_A,step_output.prev_sample,step_output['derivative'],)
A__ = step_output.prev_sample
A__ = (sample / 2 + 0.5).clamp(0,1 )
A__ = sample.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 716 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 586 | 0 |
'''simple docstring'''
def snake_case__ ( _A: Optional[int]=28123 ) -> int:
'''simple docstring'''
lowerCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCAmelCase = set()
lowerCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_A )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 370 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ChineseCLIPFeatureExtractor''']
__lowercase = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=3_0 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=3_2 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=3_7 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=3 , UpperCAmelCase__=0.6 , UpperCAmelCase__=None , ) -> Optional[int]:
_A : str = parent
_A : List[str] = batch_size
_A : Union[str, Any] = image_size
_A : Union[str, Any] = patch_size
_A : Tuple = num_channels
_A : Tuple = is_training
_A : str = use_labels
_A : Union[str, Any] = hidden_size
_A : Tuple = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : List[str] = intermediate_size
_A : str = hidden_act
_A : Any = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : List[Any] = type_sequence_label_size
_A : List[Any] = initializer_range
_A : str = mask_ratio
_A : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ) -> List[Any]:
_A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Dict = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> Optional[int]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
_A : List[str] = ViTMAEModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
_A : Dict = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : List[Any] = model(UpperCAmelCase__ )
_A : Optional[Any] = (self.image_size // self.patch_size) ** 2
_A : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A : List[Any] = 1
_A : Any = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : List[Any] = model(UpperCAmelCase__ )
_A : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ) -> str:
_A : Dict = self.prepare_config_and_inputs()
_A : str = config_and_inputs
_A : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__magic_name__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _lowerCamelCase ( self ) -> Any:
_A : int = ViTMAEModelTester(self )
_A : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _lowerCamelCase ( self ) -> int:
pass
def _lowerCamelCase ( self ) -> Dict:
_A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(UpperCAmelCase__ )
_A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Tuple = [*signature.parameters.keys()]
_A : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Tuple:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
_A : Dict = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A : Tuple = torch.from_numpy(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A : Union[str, Any] = pt_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[str] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_A : List[str] = outputs[0].cpu().numpy()
_A : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
_A : Any = model_class.from_pretrained(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Make sure we don't have nans
_A : Any = after_outputs[0].cpu().numpy()
_A : List[Any] = 0
_A : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ) -> Tuple:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _lowerCamelCase ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ) -> Dict:
pass
@slow
def _lowerCamelCase ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Union[str, Any] = ViTMAEModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowercase ( ):
"""simple docstring"""
_A : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_A : str = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(UpperCAmelCase__ )
_A : Optional[int] = self.default_image_processor
_A : int = prepare_img()
_A : str = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A : Union[str, Any] = ViTMAEConfig()
_A : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A : Union[str, Any] = model(**UpperCAmelCase__ , noise=torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ ) )
# verify the logits
_A : List[str] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
_A : Union[str, Any] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase__ ) , atol=1e-4 ) )
| 719 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 5_0 , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
_A : List[Any] = self.unet.config.sample_size
_A : List[Any] = (batch_size, 3, img_size, img_size)
_A : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A : str = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A : Dict = self.scheduler.schedule[t]
_A : Optional[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A : Tuple = self.scheduler.add_noise_to_input(UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A : Tuple = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A : List[str] = self.scheduler.step_correct(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , step_output.prev_sample , step_output['''derivative'''] , )
_A : Optional[int] = step_output.prev_sample
_A : List[str] = (sample / 2 + 0.5).clamp(0 , 1 )
_A : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 417 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a__):
_lowerCAmelCase = (DEISMultistepScheduler,)
_lowerCAmelCase = (('''num_inference_steps''', 25),)
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
lowerCamelCase : Dict = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**A )
return config
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = dict(self.forward_default_kwargs )
lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps', A )
lowerCamelCase : List[Any] = self.dummy_sample
lowerCamelCase : Optional[int] = 0.1 * sample
lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Any = self.get_scheduler_config(**A )
lowerCamelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : Tuple = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : List[Any] = sample, sample
for t in range(A, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : Tuple = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : List[str] = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : Tuple = dict(self.forward_default_kwargs )
lowerCamelCase : str = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Dict = self.dummy_sample
lowerCamelCase : List[Any] = 0.1 * sample
lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : Any = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : Union[str, Any] = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : Optional[int] = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : int = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self, A=None, **A ):
"""simple docstring"""
if scheduler is None:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config(**A )
lowerCamelCase : str = scheduler_class(**A )
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Any = self.get_scheduler_config(**A )
lowerCamelCase : List[Any] = scheduler_class(**A )
lowerCamelCase : Optional[int] = 10
lowerCamelCase : List[str] = self.dummy_model()
lowerCamelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Any = model(A, A )
lowerCamelCase : List[str] = scheduler.step(A, A, A ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = dict(self.forward_default_kwargs )
lowerCamelCase : int = kwargs.pop('num_inference_steps', A )
for scheduler_class in self.scheduler_classes:
lowerCamelCase : List[Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**A )
lowerCamelCase : Dict = self.dummy_sample
lowerCamelCase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(A, 'set_timesteps' ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A, 'set_timesteps' ):
lowerCamelCase : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase : Optional[Any] = scheduler.timesteps[5]
lowerCamelCase : Dict = scheduler.timesteps[6]
lowerCamelCase : Tuple = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Dict = scheduler.step(A, A, A, **A ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase : Optional[Any] = self.full_loop(scheduler=A )
lowerCamelCase : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
lowerCamelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = self.full_loop(scheduler=A )
lowerCamelCase : List[Any] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A, prediction_type=A, sample_max_value=A, algorithm_type='deis', solver_order=A, solver_type=A, )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
lowerCamelCase : Optional[int] = self.full_loop(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A, time_step=0 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = self.full_loop()
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config(thresholding=A, dynamic_thresholding_ratio=0 )
lowerCamelCase : List[str] = scheduler_class(**A )
lowerCamelCase : List[str] = 10
lowerCamelCase : int = self.dummy_model()
lowerCamelCase : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = model(A, A )
lowerCamelCase : Optional[Any] = scheduler.step(A, A, A ).prev_sample
assert sample.dtype == torch.floataa
| 320 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __snake_case ( a__):
_lowerCAmelCase = '''camembert'''
def __init__( self, A=3_0522, A=768, A=12, A=12, A=3072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1e-12, A=1, A=0, A=2, A="absolute", A=True, A=None, **A, ):
"""simple docstring"""
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
lowerCamelCase : Any = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : str = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class __snake_case ( a__):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 320 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
if "vqa" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 31_29
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """vqa2-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
elif "nlvr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = {0: """False""", 1: """True"""}
_SCREAMING_SNAKE_CASE = {v: k for k, v in config.idalabel.items()}
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ )
elif "irtr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ )
elif "mlm_itm" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""state_dict"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if mlm_model or irtr_model:
_SCREAMING_SNAKE_CASE = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Define processor
_SCREAMING_SNAKE_CASE = ViltImageProcessor(size=3_84 )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_SCREAMING_SNAKE_CASE = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
if mlm_model:
_SCREAMING_SNAKE_CASE = """a bunch of [MASK] laying on a [MASK]."""
else:
_SCREAMING_SNAKE_CASE = """How many cats are there?"""
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
if mlm_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 11, 3_05_22] )
_SCREAMING_SNAKE_CASE = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify masked token prediction equals "cats"
_SCREAMING_SNAKE_CASE = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 31_29] )
_SCREAMING_SNAKE_CASE = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify vqa prediction equals "2"
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 2] )
_SCREAMING_SNAKE_CASE = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ : str = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0 | 1 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase__ : Optional[Any] = "\nimport os\n"
lowerCamelCase__ : int = "\ndef foo():\n import os\n return False\n"
lowerCamelCase__ : Optional[Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
lowerCamelCase__ : int = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
lowerCamelCase__ : List[Any] = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
lowerCamelCase__ : str = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
lowerCamelCase__ : Dict = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
lowerCamelCase__ : List[str] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
lowerCamelCase__ : Dict = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
lowerCamelCase__ : List[str] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
lowerCamelCase__ : int = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , a_ )
def __A ( a_ : int , a_ : List[Any] )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a_ , '''test_file.py''' )
with open(a_ , '''w''' ) as _tmp_file:
_tmp_file.write(a_ )
SCREAMING_SNAKE_CASE : List[str] = get_imports(a_ )
assert parsed_imports == ["os"]
| 698 |
'''simple docstring'''
_lowerCAmelCase = [0, 2, 4, 6, 8]
_lowerCAmelCase = [1, 3, 5, 7, 9]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCAmelCase__ : Optional[Any] = 0
for digit in range(10 ):
lowerCAmelCase__ : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase , UpperCamelCase )
return result
lowerCAmelCase__ : Tuple = 0
for digita in range(10 ):
lowerCAmelCase__ : Any = digita
if (remainder + digita) % 2 == 0:
lowerCAmelCase__ : Union[str, Any] = ODD_DIGITS
else:
lowerCAmelCase__ : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
lowerCAmelCase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase , UpperCamelCase , )
return result
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 9 ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase , 0 , [0] * length , UpperCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """layoutlmv3"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=50_265 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=1_024 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=224 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = version.parse("""1.12""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] ) ->float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : str , UpperCAmelCase_ : "ProcessorMixin" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ) ->Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 720 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ) -> Any:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase__ ( self ) -> List[str]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[int]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
__a = OpenLlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , attention_mask=UpperCamelCase )
__a = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[str]:
__a = True
__a = OpenLlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
__a = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
__a = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Dict:
__a = OpenLlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Any:
__a = True
__a = True
__a = OpenLlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
__a = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
__a = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_a = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def UpperCamelCase__ ( self ) -> Dict:
__a = OpenLlamaModelTester(self )
__a = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Tuple:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(UpperCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = 'single_label_classification'
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(UpperCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = 'multi_label_classification'
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(UpperCamelCase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = OpenLlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size )
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = OpenLlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
__a = original_model(UpperCamelCase ).last_hidden_state
__a = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = {'type': scaling_type, 'factor': 10.0}
__a = OpenLlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
__a = scaled_model(UpperCamelCase ).last_hidden_state
__a = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
| 539 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( __magic_name__ ):
_a = """segformer"""
def __init__( self , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[2, 2, 2, 2] , UpperCamelCase=[8, 4, 2, 1] , UpperCamelCase=[32, 64, 160, 256] , UpperCamelCase=[7, 3, 3, 3] , UpperCamelCase=[4, 2, 2, 2] , UpperCamelCase=[1, 2, 5, 8] , UpperCamelCase=[4, 4, 4, 4] , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=0.1 , UpperCamelCase=1e-6 , UpperCamelCase=256 , UpperCamelCase=255 , **UpperCamelCase , ) -> int:
super().__init__(**UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCamelCase , )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = kwargs.get('reshape_last_stage' , UpperCamelCase )
__a = semantic_loss_ignore_index
class __lowercase ( __magic_name__ ):
_a = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1e-4
@property
def UpperCamelCase__ ( self ) -> int:
return 12
| 539 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = (EulerDiscreteScheduler,)
lowerCamelCase_ = 1_0
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : Optional[int] = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase )
return config
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
A_ : List[Any] = torch.manual_seed(0 )
A_ : Tuple = self.dummy_model()
A_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : List[str] = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Union[str, Any] = scheduler.scale_model_input(lowercase , lowercase )
A_ : Union[str, Any] = model(lowercase , lowercase )
A_ : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Optional[int] = output.prev_sample
A_ : int = torch.sum(torch.abs(lowercase ) )
A_ : Optional[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
A_ : List[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
A_ : Dict = torch.manual_seed(0 )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : int = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Any = scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = model(lowercase , lowercase )
A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Optional[Any] = output.prev_sample
A_ : Tuple = torch.sum(torch.abs(lowercase ) )
A_ : Union[str, Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
A_ : Dict = torch.manual_seed(0 )
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : int = sample.to(lowercase )
for t in scheduler.timesteps:
A_ : Dict = scheduler.scale_model_input(lowercase , lowercase )
A_ : int = model(lowercase , lowercase )
A_ : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Optional[int] = output.prev_sample
A_ : Optional[int] = torch.sum(torch.abs(lowercase ) )
A_ : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**lowercase , use_karras_sigmas=lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : str = self.dummy_model()
A_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : str = sample.to(lowercase )
for t in scheduler.timesteps:
A_ : Tuple = scheduler.scale_model_input(lowercase , lowercase )
A_ : str = model(lowercase , lowercase )
A_ : Optional[int] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Union[str, Any] = output.prev_sample
A_ : Dict = torch.sum(torch.abs(lowercase ) )
A_ : Tuple = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
'''simple docstring'''
import argparse
import json
import subprocess
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = (
F'curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
__lowerCamelCase : List[str] = subprocess.run(UpperCamelCase__ ,shell=UpperCamelCase__ ,stdout=subprocess.PIPE )
__lowerCamelCase : Optional[Any] = output.stdout.decode('utf-8' )
__lowerCamelCase : Dict = json.loads(UpperCamelCase__ )
__lowerCamelCase : Optional[Any] = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' ,'w' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase : List[str] = """\n""".join([x['name'] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a_ ( _lowerCAmelCase ) -> Optional[Any]:
return values.split(',' )
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_UpperCamelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 459 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
def brightness(UpperCamelCase__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_snake_case = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 389 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __magic_name__ :
"""simple docstring"""
pass
| 716 |
import math
import qiskit
def _lowerCamelCase ( _a = 1 , _a = 1 , _a = 1 ):
"""simple docstring"""
if (
isinstance(_a , _a )
or isinstance(_a , _a )
or isinstance(_a , _a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_a ) != input_a)
or (math.floor(_a ) != input_a)
or (math.floor(_a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowerCamelCase = qiskit.QuantumRegister(4 , '''qr''' )
_lowerCamelCase = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_lowerCamelCase = [input_a, input_a, carry_in]
_lowerCamelCase = qiskit.QuantumCircuit(_a , _a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _a ) # measure the last two qbits
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
_lowerCamelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 297 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase__ = data_utils.TransfoXLTokenizer
lowerCAmelCase__ = data_utils.TransfoXLCorpus
lowerCAmelCase__ = data_utils
lowerCAmelCase__ = data_utils
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCamelCase_ , 'rb') as fp:
UpperCamelCase__ : Union[str, Any] = pickle.load(lowerCamelCase_ , encoding='latin1')
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'Save vocabulary to {pytorch_vocab_dump_path}')
UpperCamelCase__ : Any = corpus.vocab.__dict__
torch.save(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Any = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , lowerCamelCase_)
UpperCamelCase__ : List[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}')
torch.save(lowerCamelCase_ , lowerCamelCase_)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ : str = os.path.abspath(lowerCamelCase_)
UpperCamelCase__ : int = os.path.abspath(lowerCamelCase_)
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.')
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ : Tuple = TransfoXLConfig()
else:
UpperCamelCase__ : Optional[int] = TransfoXLConfig.from_json_file(lowerCamelCase_)
print(f'Building PyTorch model from configuration: {config}')
UpperCamelCase__ : Union[str, Any] = TransfoXLLMHeadModel(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = load_tf_weights_in_transfo_xl(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Save pytorch-model
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_)
print(f'Save PyTorch model to {os.path.abspath(lowerCamelCase_)}')
torch.save(model.state_dict() , lowerCamelCase_)
print(f'Save configuration file to {os.path.abspath(lowerCamelCase_)}')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 596 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-1'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-2'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-3'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-4'
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ):
super()._init_()
UpperCamelCase__ : int = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def __UpperCamelCase ( self : Optional[Any]):
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith('_')}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ : Dict = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ : List[str] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 596 | 1 |
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> float:
'''simple docstring'''
snake_case_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
snake_case_ = 1 - (matter_density + radiation_density + dark_energy)
snake_case_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 713 |
from __future__ import annotations
def UpperCamelCase( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> tuple[int, float, str]:
'''simple docstring'''
snake_case_ = cipher_alphabet or [chr(lowercase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case_ = {
"""a""": 0.0_84_97,
"""b""": 0.0_14_92,
"""c""": 0.0_22_02,
"""d""": 0.0_42_53,
"""e""": 0.1_11_62,
"""f""": 0.0_22_28,
"""g""": 0.0_20_15,
"""h""": 0.0_60_94,
"""i""": 0.0_75_46,
"""j""": 0.0_01_53,
"""k""": 0.0_12_92,
"""l""": 0.0_40_25,
"""m""": 0.0_24_06,
"""n""": 0.0_67_49,
"""o""": 0.0_75_07,
"""p""": 0.0_19_29,
"""q""": 0.0_00_95,
"""r""": 0.0_75_87,
"""s""": 0.0_63_27,
"""t""": 0.0_93_56,
"""u""": 0.0_27_58,
"""v""": 0.0_09_78,
"""w""": 0.0_25_60,
"""x""": 0.0_01_50,
"""y""": 0.0_19_94,
"""z""": 0.0_00_77,
}
else:
# Custom frequencies dictionary
snake_case_ = frequencies_dict
if not case_sensitive:
snake_case_ = ciphertext.lower()
# Chi squared statistic values
snake_case_ = {}
# cycle through all of the shifts
for shift in range(len(lowercase_ ) ):
snake_case_ = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case_ = decrypted_with_shift.lower().count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case_ = decrypted_with_shift.count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case_ = min(
lowercase_ , key=lowercase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case_
) , (
snake_case_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 161 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : int=3 , __lowerCAmelCase : int=18 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : List[str]=4_00 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Tuple=False , ) -> List[str]:
_A = size if size is not None else {'''height''': 20, '''width''': 20}
_A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_reduce_labels
def snake_case_ ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(dataset[0]['''file'''] )
_A = Image.open(dataset[1]['''file'''] )
return image, map
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] )
_A = Image.open(ds[1]['''file'''] )
_A = Image.open(ds[2]['''file'''] )
_A = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = BeitImageProcessingTester(self )
@property
def snake_case_ ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def snake_case_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
_A = []
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
_A , _A = prepare_semantic_batch_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def snake_case_ ( self : List[str] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
_A = True
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 2 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : str = bs[:]
_lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : Optional[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
errors=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, **__a, )
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : str = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : int = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : List[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : int = 0
while i < len(__a):
try:
_lowerCAmelCase : Union[str, Any] = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Union[str, Any] = tuple(__a)
_lowerCAmelCase : List[str] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a)
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : Tuple = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat, __a):
_lowerCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a).split(" "))
return bpe_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a)
_lowerCAmelCase : Any = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Tuple = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Any = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a=False, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__a) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = " " + text
return (text, kwargs)
| 500 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ) -> Any:
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , A_ ) -> int:
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self , A_ , A_ = False ) -> List[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def __snake_case ( self ) -> Optional[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def __snake_case ( self , A_ ) -> Tuple:
self.generated_responses.append(A_ )
def __snake_case ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> List[str]:
lowerCAmelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> int:
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def __snake_case ( self , A_=None , A_=None , A_=None , **A_ ) -> Dict:
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ) -> Dict:
lowerCAmelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self , A_ , A_=32 ) -> Dict[str, Any]:
if not isinstance(A_ , A_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self , A_ , A_=10 , **A_ ) -> Tuple:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
lowerCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase = model_inputs.pop("""conversation""" )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self , A_ , A_=True ) -> int:
lowerCAmelCase = model_outputs["""output_ids"""]
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
lowerCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def __snake_case ( self , A_ ) -> Dict:
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 344 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> Tuple:
lowerCAmelCase = question_encoder
lowerCAmelCase = generator
lowerCAmelCase = self.question_encoder
def __snake_case ( self , A_ ) -> List[str]:
if os.path.isfile(A_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(A_ , exist_ok=A_ )
lowerCAmelCase = os.path.join(A_ , """question_encoder_tokenizer""" )
lowerCAmelCase = os.path.join(A_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(A_ )
self.generator.save_pretrained(A_ )
@classmethod
def __snake_case ( cls , A_ , **A_ ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase = kwargs.pop("""config""" , A_ )
if config is None:
lowerCAmelCase = RagConfig.from_pretrained(A_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(
A_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCAmelCase = AutoTokenizer.from_pretrained(
A_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=A_ , generator=A_ )
def __call__( self , *A_ , **A_ ) -> List[str]:
return self.current_tokenizer(*A_ , **A_ )
def __snake_case ( self , *A_ , **A_ ) -> Union[str, Any]:
return self.generator.batch_decode(*A_ , **A_ )
def __snake_case ( self , *A_ , **A_ ) -> str:
return self.generator.decode(*A_ , **A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.question_encoder
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.generator
def __snake_case ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = "longest" , A_ = None , A_ = True , **A_ , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , A_ , )
if max_length is None:
lowerCAmelCase = self.current_tokenizer.model_max_length
lowerCAmelCase = self(
A_ , add_special_tokens=A_ , return_tensors=A_ , max_length=A_ , padding=A_ , truncation=A_ , **A_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase = self.current_tokenizer.model_max_length
lowerCAmelCase = self(
text_target=A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , max_length=A_ , truncation=A_ , **A_ , )
lowerCAmelCase = labels["""input_ids"""]
return model_inputs | 344 | 1 |
import operator
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE_ = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE_ = [arr.pop(0 )]
for i, item in enumerate(SCREAMING_SNAKE_CASE ):
if _operator(SCREAMING_SNAKE_CASE , sublist[-1] ):
sublist.append(SCREAMING_SNAKE_CASE )
arr.pop(SCREAMING_SNAKE_CASE )
# merging sublist into solution list
if not solution:
solution.extend(SCREAMING_SNAKE_CASE )
else:
while sublist:
SCREAMING_SNAKE_CASE_ = sublist.pop(0 )
for i, xx in enumerate(SCREAMING_SNAKE_CASE ):
if not _operator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
solution.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
else:
solution.append(SCREAMING_SNAKE_CASE )
strand_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 205 |
from __future__ import annotations
from collections.abc import MutableSequence
class a_ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE_ = list(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = degree
def __add__( self , SCREAMING_SNAKE_CASE ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE )
def __sub__( self , SCREAMING_SNAKE_CASE ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , SCREAMING_SNAKE_CASE ) -> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self ) -> str:
"""simple docstring"""
return self.__str__()
def A_( self ) -> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE = 0 ) -> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE_ = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE )
def __eq__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return not self.__eq__(SCREAMING_SNAKE_CASE )
| 205 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """facebook/bart-large-mnli"""
__snake_case = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__snake_case = """text_classifier"""
__snake_case = AutoTokenizer
__snake_case = AutoModelForSequenceClassification
__snake_case = ["""text""", ["""text"""]]
__snake_case = ["""text"""]
def a__ ( self ) -> int:
super().setup()
_lowerCamelCase : Optional[int] = self.model.config
_lowerCamelCase : Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_lowerCamelCase : Tuple = int(_lowercase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def a__ ( self , _lowercase , _lowercase ) -> Any:
_lowerCamelCase : Tuple = labels
return self.pre_processor(
[text] * len(_lowercase ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def a__ ( self , _lowercase ) -> str:
_lowerCamelCase : Union[str, Any] = outputs.logits
_lowerCamelCase : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 558 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 558 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : MutableSequence[float] ) ->None:
if len(_UpperCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
snake_case_ = list(_UpperCamelCase )
snake_case_ = degree
def __add__( self : Any , _UpperCamelCase : Polynomial ) ->Polynomial:
if self.degree > polynomial_a.degree:
snake_case_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCamelCase )
else:
snake_case_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCamelCase )
def __sub__( self : Any , _UpperCamelCase : Polynomial ) ->Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) ->Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[Any] , _UpperCamelCase : Polynomial ) ->Polynomial:
snake_case_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : int | float ) ->int | float:
snake_case_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) ->str:
snake_case_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCamelCase )
return polynomial
def __repr__( self : Union[str, Any] ) ->str:
return self.__str__()
def snake_case__( self : Any ) ->Polynomial:
snake_case_ = [0] * self.degree
for i in range(self.degree ):
snake_case_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCamelCase )
def snake_case__( self : List[Any] , _UpperCamelCase : int | float = 0 ) ->Polynomial:
snake_case_ = [0] * (self.degree + 2)
snake_case_ = constant
for i in range(self.degree + 1 ):
snake_case_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCamelCase )
def __eq__( self : str , _UpperCamelCase : object ) ->bool:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : str , _UpperCamelCase : object ) ->bool:
return not self.__eq__(_UpperCamelCase ) | 39 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ):
A , A = position
A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
A = []
for position in positions:
A , A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case__ )
return permissible_positions
def _snake_case ( snake_case__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ):
if is_complete(snake_case__ ):
return True
for position in get_valid_pos(snake_case__ , len(snake_case__ ) ):
A , A = position
if board[y][x] == 0:
A = curr + 1
if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ):
return True
A = 0
return False
def _snake_case ( snake_case__ : int ):
A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A = 1
if open_knight_tour_helper(snake_case__ , (i, j) , 1 ):
return board
A = 0
A = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__magic_name__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , ):
if attention_mask is None:
__snake_case : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.02 , ):
__snake_case : Dict = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : Optional[int] = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = eos_token_id
__snake_case : Union[str, Any] = pad_token_id
__snake_case : int = bos_token_id
__snake_case : Union[str, Any] = initializer_range
def lowercase_ ( self ):
__snake_case : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__snake_case : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__snake_case : List[Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__snake_case : Optional[Any] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = 20
__snake_case : Tuple = model_class_name(_UpperCAmelCase )
__snake_case : Dict = model.encode(inputs_dict['input_ids'] )
__snake_case , __snake_case : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__snake_case : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__snake_case : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Dict = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = 20
__snake_case : List[str] = model_class_name(_UpperCAmelCase )
__snake_case : str = model.encode(inputs_dict['input_ids'] )
__snake_case , __snake_case : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__snake_case : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : str = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__snake_case : Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Dict = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__snake_case : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = 9_9
def lowercase_ ( self ):
__snake_case : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__snake_case : List[str] = input_ids.shape[0]
__snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case : int = self._get_config_and_data()
__snake_case : Tuple = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__snake_case : Union[str, Any] = lm_model(input_ids=_UpperCAmelCase )
__snake_case : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__snake_case : List[Any] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__snake_case : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__snake_case : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__snake_case : List[str] = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__snake_case : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__snake_case : Any = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : Any = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__snake_case : List[Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase , UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self ):
__snake_case : List[str] = FlaxBlenderbotModelTester(self )
def lowercase_ ( self ):
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Any = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__snake_case : Any = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case : Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : List[Any] = model_class(_UpperCAmelCase )
__snake_case : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__snake_case : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('JIT Enabled' ):
__snake_case : int = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case : Union[str, Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ):
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
__snake_case : Optional[int] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowercase_ ( self ):
__snake_case : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__snake_case : Union[str, Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCAmelCase )
__snake_case : List[str] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__snake_case : str = ['Sam']
__snake_case : int = tokenizer(_UpperCAmelCase , return_tensors='jax' )
__snake_case : Tuple = model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = 'Sam is a great name. It means "sun" in Gaelic.'
__snake_case : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 679 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCamelCase : Dict = random.Random()
def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :Tuple=1.0 , __magic_name__ :List[Any]=None , __magic_name__ :List[Any]=None ):
if rng is None:
UpperCAmelCase_ = global_rng
UpperCAmelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Tuple=4_00 , lowerCAmelCase_ : Any=20_00 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Optional[Any]=1_60_00 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=80 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : int="hann_window" , lowerCAmelCase_ : int=80 , lowerCAmelCase_ : List[str]=76_00 , lowerCAmelCase_ : Optional[int]=1e-10 , lowerCAmelCase_ : Optional[int]=True , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = min_seq_length
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = win_length
UpperCAmelCase_ = win_function
UpperCAmelCase_ = fmin
UpperCAmelCase_ = fmax
UpperCAmelCase_ = mel_floor
UpperCAmelCase_ = return_attention_mask
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ) -> int:
def _flatten(lowerCAmelCase_ : Any ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
UpperCAmelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase ( self : Any , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=False ) -> Optional[Any]:
if equal_length:
UpperCAmelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__A = SpeechTaFeatureExtractor
def UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : int ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase ( self : Dict ) -> str:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
UpperCAmelCase_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase_ = [None, 16_00, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''np''' )
UpperCAmelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase_ = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase_ = [None, 16_00, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=20_00 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def UpperCamelCase ( self : str ) -> Tuple:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase_ = np.asarray(_UpperCAmelCase )
UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def UpperCamelCase ( self : Tuple ) -> Tuple:
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ = feat_extract.model_input_names[0]
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ = feat_extract.model_input_names[0]
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ = feat_extract.model_input_names[0]
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase_ = self.feat_extract_dict
UpperCAmelCase_ = True
UpperCAmelCase_ = self.feature_extraction_class(**_UpperCAmelCase )
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ = [len(_UpperCAmelCase ) for x in speech_inputs]
UpperCAmelCase_ = feat_extract.model_input_names[0]
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def UpperCamelCase ( self : str ) -> Dict:
UpperCAmelCase_ = self.feat_extract_dict
UpperCAmelCase_ = True
UpperCAmelCase_ = self.feature_extraction_class(**_UpperCAmelCase )
UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ = [len(_UpperCAmelCase ) for x in speech_inputs]
UpperCAmelCase_ = feat_extract.model_input_names[0]
UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ = min(_UpperCAmelCase )
UpperCAmelCase_ = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> int:
from datasets import load_dataset
UpperCAmelCase_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase_ = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase_ = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1 )
UpperCAmelCase_ = SpeechTaFeatureExtractor()
UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) )
def UpperCamelCase ( self : int ) -> Any:
UpperCAmelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1 )
UpperCAmelCase_ = SpeechTaFeatureExtractor()
UpperCAmelCase_ = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) )
| 121 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : Any =["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def A__ ( __A , __A ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def A__ ( __A ):
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCamelCase__ )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / """cache"""
_lowerCamelCase : str = test_hf_cache_home / """datasets"""
_lowerCamelCase : str = test_hf_cache_home / """metrics"""
_lowerCamelCase : Optional[int] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(UpperCamelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(UpperCamelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(UpperCamelCase__ ) )
_lowerCamelCase : int = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
_lowerCamelCase : List[str] = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope="""session""" )
def A__ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , UpperCamelCase__ )
@pytest.fixture
def A__ ( __A ):
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , UpperCamelCase__ )
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Optional[Any] ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = GPTSwaTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[Any] = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = GPTSwaTokenizer(lowerCamelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__lowercase = '''This is a test'''
__lowercase = '''This is a test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCamelCase__ ) , 2_000 )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [465, 287, 265, 631, 842] )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__lowercase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__lowercase = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=lowerCamelCase__ , )
| 705 |
import itertools
import string
from collections.abc import Generator, Iterable
def _A( UpperCamelCase__ : Iterable[str] , UpperCamelCase__ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__lowercase = iter(UpperCamelCase__ )
while True:
__lowercase = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def _A( UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowercase = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def _A( UpperCamelCase__ : str ) -> list[str]:
'''simple docstring'''
__lowercase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = prepare_input(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 362 | 0 |
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = name
_SCREAMING_SNAKE_CASE : Optional[Any] = val
def __str__( self ) -> Any:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __lowerCamelCase ) -> int:
return self.val < other.val
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = {}
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = self.build_heap(lowerCamelCase_ )
def __getitem__( self , __lowerCamelCase ) -> List[str]:
return self.get_value(lowerCamelCase_ )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
return (idx - 1) // 2
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
return idx * 2 + 1
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
return idx * 2 + 2
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
return self.heap_dict[key]
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ ) - 1
_SCREAMING_SNAKE_CASE : int = self.get_parent_idx(lowerCamelCase_ )
for idx, i in enumerate(lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = idx
_SCREAMING_SNAKE_CASE : str = i.val
for i in range(lowerCamelCase_ , -1 , -1 ):
self.sift_down(lowerCamelCase_ , lowerCamelCase_ )
return array
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
while True:
_SCREAMING_SNAKE_CASE : int = self.get_left_child_idx(lowerCamelCase_ ) # noqa: E741
_SCREAMING_SNAKE_CASE : List[Any] = self.get_right_child_idx(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : int = idx
if l < len(lowerCamelCase_ ) and array[l] < array[idx]:
_SCREAMING_SNAKE_CASE : Tuple = l
if r < len(lowerCamelCase_ ) and array[r] < array[smallest]:
_SCREAMING_SNAKE_CASE : Optional[Any] = r
if smallest != idx:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = array[smallest], array[idx]
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_SCREAMING_SNAKE_CASE : str = smallest
else:
break
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.get_parent_idx(lowerCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.heap[idx], self.heap[p]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_SCREAMING_SNAKE_CASE : Optional[Any] = p
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_parent_idx(lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> List[str]:
return self.heap[0]
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.heap[-1], self.heap[0]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_SCREAMING_SNAKE_CASE : str = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
self.heap.append(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : Any = len(self.heap ) - 1
_SCREAMING_SNAKE_CASE : List[str] = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase_ ( self ) -> Dict:
return len(self.heap ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_value
_SCREAMING_SNAKE_CASE : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ =Node('R', -1)
UpperCamelCase__ =Node('B', 6)
UpperCamelCase__ =Node('A', 3)
UpperCamelCase__ =Node('X', 1)
UpperCamelCase__ =Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 249 |
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = len(_lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(_lowerCAmelCase )]
# Reverse whole list
UpperCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(_lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : str = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 392 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
if len(_A ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
_UpperCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case_ : str = logging.getLogger(__name__)
snake_case_ : Optional[Any] = 50 # max width of layer names
snake_case_ : Dict = 70 # max width of quantizer names
def lowercase__( _UpperCamelCase : Optional[int] )-> Any:
"""simple docstring"""
_UpperCamelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCAmelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCAmelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCAmelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCAmelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCAmelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCAmelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def lowercase__( _UpperCamelCase : Optional[Any] )-> Dict:
"""simple docstring"""
if args.calibrator == "max":
_UpperCamelCase = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
_UpperCamelCase = 'histogram'
elif args.calibrator == "mse":
_UpperCamelCase = 'histogram'
else:
raise ValueError(f"Invalid calibrator {args.calibrator}" )
_UpperCamelCase = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_UpperCamelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=False )-> List[str]:
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [""] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def lowercase__( _UpperCamelCase : int )-> Dict:
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"{name:80}: {module}" )
def lowercase__( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def lowercase__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
def fusea(_UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
_UpperCamelCase = qq._amax.detach().item()
_UpperCamelCase = qk._amax.detach().item()
_UpperCamelCase = qv._amax.detach().item()
_UpperCamelCase = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : Dict )-> Union[str, Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
_UpperCamelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_UpperCamelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def lowercase__( _UpperCamelCase : int )-> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
_UpperCamelCase = mod.weight.shape[0]
_UpperCamelCase = mod._weight_quantizer._amax.detach()
_UpperCamelCase = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def lowercase__( _UpperCamelCase : str )-> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_UpperCamelCase = amax
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any]=25 , _UpperCamelCase : Any=180 , _UpperCamelCase : List[str]=None )-> Tuple:
"""simple docstring"""
if ignore is None:
_UpperCamelCase = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCamelCase = [ignore]
_UpperCamelCase = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , "weight" ):
continue
_UpperCamelCase = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_UpperCamelCase = getattr(lowerCAmelCase_ , "_input_quantizer" , lowerCAmelCase_ )
_UpperCamelCase = getattr(lowerCAmelCase_ , "_weight_quantizer" , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , "weight" ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_UpperCamelCase = f"Act:{input_q.extra_repr()}"
_UpperCamelCase = f"Wgt:{weight_q.extra_repr()}"
_UpperCamelCase = f"{name:{name_width}} {act_str} {wgt_str}"
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f"{name:{name_width}} {act_str}" )
logger.info(f"{' ':{name_width}} {wgt_str}" )
def lowercase__( _UpperCamelCase : Optional[int] )-> Optional[int]:
"""simple docstring"""
_UpperCamelCase = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"{name:80} {mod}" )
count += 1
print(f"{count} TensorQuantizers found in model" )
def lowercase__( _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] )-> Tuple:
"""simple docstring"""
_UpperCamelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f"{name} has no {quantizer}" )
def lowercase__( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]="both" , **_UpperCamelCase : int )-> Tuple:
"""simple docstring"""
_UpperCamelCase = f"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , "_input_quantizer" , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , "_weight_quantizer" , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def lowercase__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str )-> List[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , "_input_quantizer" ) or hasattr(lowerCAmelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCamelCase = f"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 138 |
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =[]
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE_ : Dict =True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE_ : Tuple =False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 220 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = 'T5Config'
def _lowerCamelCase ( lowercase : jnp.array , lowercase : int , lowercase : int ) -> jnp.ndarray:
_a = jnp.zeros_like(lowercase )
_a = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_a = shifted_input_ids.at[:, 0].set(lowercase )
_a = jnp.where(shifted_input_ids == -100 , lowercase , lowercase )
return shifted_input_ids
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='mt5'
__a =MTaConfig
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='mt5'
__a =MTaConfig
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='mt5'
__a =MTaConfig
| 521 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Optional[int] , **__a : List[Any] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int , __a : Tuple ):
_a = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : int , __a : Union[str, Any] , __a : str ):
_a = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __a )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCamelCase__ ( self : List[Any] ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = "Intel/dpt-large"
_a = pipeline("depth-estimation" , model=__a )
_a = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_a = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 521 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
snake_case = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a )} , )
snake_case = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = field(
default=_a , metadata={"help": "The input training data file (a text file)."} )
snake_case = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
snake_case = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
snake_case = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
snake_case = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
snake_case = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
snake_case = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."} )
snake_case = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
snake_case = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
snake_case = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
snake_case = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
snake_case = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ):
def _dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , ref_path=_lowerCamelCase , )
return LineByLineTextDataset(tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A_ : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A_ : Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A_ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
A_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A_ : Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
A_ : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
A_ : List[Any] = AutoModelWithLMHead.from_config(_lowerCamelCase )
model.resize_token_embeddings(len(_lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
A_ : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A_ : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A_ : Tuple = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A_ : Optional[Any] = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , evaluate=_lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A_ : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A_ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
A_ : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A_ : str = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , data_collator=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , prediction_loss_only=_lowerCamelCase , )
# Training
if training_args.do_train:
A_ : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : Union[str, Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A_ : Any = trainer.evaluate()
A_ : Dict = math.exp(eval_output['''eval_loss'''] )
A_ : int = {"""perplexity""": perplexity}
A_ : List[str] = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _lowerCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_lowerCamelCase )
return results
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 590 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MobileViTFeatureExtractor"""]
UpperCamelCase_ = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 384 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A ( a ):
__UpperCAmelCase : bool = field(default=a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
_a = v.to_dict()
return d
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = LxmertTokenizer
_UpperCAmelCase :Union[str, Any] = LxmertTokenizerFast
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :Dict = True
def _snake_case ( self ):
super().setUp()
lowercase__: List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = '''UNwant\u00E9d,running'''
lowercase__: Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowercase__: str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ):
if not self.test_rust_tokenizer:
return
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: Optional[int] = self.get_rust_tokenizer()
lowercase__: int = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = self.get_rust_tokenizer()
lowercase__: Union[str, Any] = tokenizer.encode(_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 586 | """simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__A = 1_0
__A = 2_5_6
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[MinHash]:
if len(__UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase__: Tuple = MinHash(num_perm=__UpperCAmelCase )
for token in set(__UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Set[str]:
return {t for t in NON_ALPHA.split(__UpperCAmelCase ) if len(t.strip() ) > 0}
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , *,
_UpperCAmelCase = 0.85 , ):
lowercase__: Optional[int] = duplication_jaccard_threshold
lowercase__: str = NUM_PERM
lowercase__: Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase__: Optional[int] = defaultdict(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = self._index.query(_UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
lowercase__: Dict = [base] + list(_UpperCAmelCase )
# reformat the cluster to be a list of dict
lowercase__: Union[str, Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_UpperCAmelCase )
return duplicate_clusters
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = self.get_duplicate_clusters()
with open(_UpperCAmelCase , '''w''' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__, lowercase__: Union[str, Any] = element
lowercase__: Any = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__UpperCAmelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=__UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__UpperCAmelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(__UpperCAmelCase , __UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowercase__: Optional[Any] = get_tokens(__UpperCAmelCase )
lowercase__: Optional[Any] = get_tokens(__UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__A = None
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Any = []
for elementa in cluster:
lowercase__: List[str] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase__: Any = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__UpperCAmelCase , __UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase__: int = 1
extremes.append(__UpperCAmelCase )
return extremes
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
global _shared_dataset
lowercase__: Optional[int] = dataset
lowercase__: Union[str, Any] = []
lowercase__: str = partial(_find_cluster_extremes_shared , jaccard_threshold=__UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__UpperCAmelCase , __UpperCAmelCase , ) , total=len(__UpperCAmelCase ) , ):
extremes_list.append(__UpperCAmelCase )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase__: Any = make_duplicate_clusters(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase__: List[str] = {}
lowercase__: int = find_extremes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase__: str = element
lowercase__: List[str] = duplicate_indices - set(extreme_dict.keys() )
lowercase__: List[str] = dataset.filter(lambda __UpperCAmelCase , __UpperCAmelCase : idx not in remove_indices , with_indices=__UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase__: Optional[int] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase__: Optional[int] = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(__UpperCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__UpperCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__UpperCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__UpperCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__UpperCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 586 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : int = logging.get_logger(__name__)
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''maskformer-swin'''
UpperCAmelCase_ : List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __UpperCAmelCase=2_24 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Any:
super().__init__(**__UpperCAmelCase )
A : Tuple = image_size
A : List[str] = patch_size
A : Optional[int] = num_channels
A : Tuple = embed_dim
A : Optional[Any] = depths
A : Any = len(__UpperCAmelCase )
A : Union[str, Any] = num_heads
A : Any = window_size
A : Optional[Any] = mlp_ratio
A : List[str] = qkv_bias
A : Any = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : List[str] = drop_path_rate
A : Dict = hidden_act
A : int = use_absolute_embeddings
A : Any = layer_norm_eps
A : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A : Optional[int] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
A : Tuple = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
A : Optional[int] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 423 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DiTPipeline
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a_ = False
def lowercase ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowerCAmelCase_ , )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowercase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=0 ) -> int:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = 'cpu'
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__lowerCAmelCase = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def lowercase ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self : List[str] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self : int ) -> int:
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 53 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCAmelCase ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Tuple = len(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowercase ,__lowercase : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(1_0, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 523 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100_0000 ) ->int:
'''simple docstring'''
a : Any = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowercase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 633 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int = 0 , _lowercase : int = 0 ) ->list:
'''simple docstring'''
a : Optional[Any] = end or len(_lowercase )
for i in range(_lowercase , _lowercase ):
a : List[str] = i
a : Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a : Optional[Any] = array[temp_index - 1]
temp_index -= 1
a : Any = temp_index_value
return array
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int ) ->None: # Max Heap
'''simple docstring'''
a : Tuple = index
a : List[Any] = 2 * index + 1 # Left Node
a : Dict = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a : int = right_index
if largest != index:
a, a : List[Any] = array[largest], array[index]
heapify(_lowercase , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
a : int = len(_lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(_lowercase , _lowercase , _lowercase )
for i in range(n - 1 , 0 , -1 ):
a, a : str = array[0], array[i]
heapify(_lowercase , 0 , _lowercase )
return array
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
a : List[Any] = low
a : int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a, a : Union[str, Any] = array[j], array[i]
i += 1
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
if len(_lowercase ) == 0:
return array
a : Tuple = 2 * math.ceil(math.loga(len(_lowercase ) ) )
a : List[str] = 16
return intro_sort(_lowercase , 0 , len(_lowercase ) , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ) ->list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowercase )
max_depth -= 1
a : List[str] = median_of_a(_lowercase , _lowercase , start + ((end - start) // 2) + 1 , end - 1 )
a : Tuple = partition(_lowercase , _lowercase , _lowercase , _lowercase )
intro_sort(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
a : Union[str, Any] = p
return insertion_sort(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[int] = input('''Enter numbers separated by a comma : ''').strip()
a : List[str] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 633 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=13 ,__SCREAMING_SNAKE_CASE=30 ,__SCREAMING_SNAKE_CASE=2 ,__SCREAMING_SNAKE_CASE=3 ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=32 ,__SCREAMING_SNAKE_CASE=5 ,__SCREAMING_SNAKE_CASE=4 ,__SCREAMING_SNAKE_CASE=37 ,__SCREAMING_SNAKE_CASE="gelu" ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=10 ,__SCREAMING_SNAKE_CASE=0.02 ,__SCREAMING_SNAKE_CASE=None ,):
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : str = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Dict = num_patches + 1
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def __a ( self ):
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = ViTMSNModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __a ( self ):
SCREAMING_SNAKE_CASE : str = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __a ( self ):
pass
def __a ( self ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE ,nn.Linear ) )
def __a ( self ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __a ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __a ( self ):
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : str = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 220 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('KT')
__UpperCAmelCase = TypeVar('VT')
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = "root" ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Optional[Any] = key
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self ):
return f"""Node({self.key}: {self.value})"""
@property
def __a ( self ):
return len(self.forward )
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = 0.5 ,__SCREAMING_SNAKE_CASE = 16 ):
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = p
SCREAMING_SNAKE_CASE : Dict = max_level
def __str__( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return f"""SkipList(level={self.level})"""
SCREAMING_SNAKE_CASE : Optional[Any] = max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) ,default=4 )
SCREAMING_SNAKE_CASE : List[str] = max(__SCREAMING_SNAKE_CASE ,4 ) + 4
SCREAMING_SNAKE_CASE : List[Any] = self.head
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : str = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward
lines.append('None'.ljust(__SCREAMING_SNAKE_CASE ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(__SCREAMING_SNAKE_CASE )
def __iter__( self ):
SCREAMING_SNAKE_CASE : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Any = node.forward[0]
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = update_node.forward[:i]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,__SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : str = level
SCREAMING_SNAKE_CASE : List[str] = Node(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : List[Any] = new_node
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE : int = skip_list.head
SCREAMING_SNAKE_CASE : List[str] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE : List[str] = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE : List[str] = skip_list.head
SCREAMING_SNAKE_CASE : str = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : int = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE_ ( ) -> str:
SCREAMING_SNAKE_CASE : Tuple = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(snake_case_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
def is_sorted(snake_case_ : int ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 220 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = BartphoTokenizer
snake_case = False
snake_case = True
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
super().setUp()
A_ : Optional[Any] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
A_ : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
A_ : Tuple = BartphoTokenizer(_SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Dict = '''This is a là test'''
A_ : List[Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def _snake_case ( self )->int:
'''simple docstring'''
A_ : str = BartphoTokenizer(_SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
A_ : Union[str, Any] = '''This is a là test'''
A_ : List[str] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
A_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = tokens + [tokenizer.unk_token]
A_ : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 590 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = 42
snake_case = 42
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Node | None = None
for i in sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ):
A_ : Dict = Node(_SCREAMING_SNAKE_CASE , self.head )
def __iter__( self )->Iterator[int]:
'''simple docstring'''
A_ : str = self.head
while node:
yield node.data
A_ : Tuple = node.next_node
def __len__( self )->int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )->str:
'''simple docstring'''
return " -> ".join([str(_SCREAMING_SNAKE_CASE ) for node in self] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return SortedLinkedList(list(SCREAMING_SNAKE_CASE ) + list(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 590 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowerCamelCase) # No of vertices in graph
__lowerCAmelCase = [0] * n
__lowerCAmelCase = [False] * n
def dfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = True
__lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, id_)
__lowerCAmelCase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowerCAmelCase = min(low[at], low[to])
__lowerCAmelCase = []
for i in range(lowerCamelCase):
if not visited[i]:
dfs(lowerCamelCase, -1, lowerCamelCase, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474 |
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 474 | 1 |
from copy import deepcopy
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : list[int] | None = None , UpperCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_lowercase : Tuple = size
_lowercase : List[str] = [0] * size
elif arr is not None:
self.init(UpperCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def lowerCAmelCase_ ( self : Any , UpperCamelCase : list[int] ):
"""simple docstring"""
_lowercase : List[str] = len(UpperCamelCase )
_lowercase : int = deepcopy(UpperCamelCase )
for i in range(1 , self.size ):
_lowercase : List[Any] = self.next_(UpperCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Tuple = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_lowercase : Any = self.next_(UpperCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def lowerCAmelCase_ ( self : Any , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowercase : List[str] = self.next_(UpperCamelCase )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
self.add(UpperCamelCase , value - self.get(UpperCamelCase ) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_lowercase : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowercase : List[Any] = self.prev(UpperCamelCase )
return result
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
return self.prefix(UpperCamelCase ) - self.prefix(UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
return self.query(UpperCamelCase , index + 1 )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_lowercase : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowercase : Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 322 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = "mobilenet_v2"
def __init__( self : Dict , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : Optional[Any]=1.0 , lowerCAmelCase : Optional[int]=8 , lowerCAmelCase : Dict=8 , lowerCAmelCase : int=6 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : int=True , lowerCAmelCase : int=True , lowerCAmelCase : int="relu6" , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=0.8 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : str=0.0_01 , lowerCAmelCase : List[Any]=2_55 , **lowerCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.')
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = depth_divisible_by
lowercase__ = min_depth
lowercase__ = expand_ratio
lowercase__ = output_stride
lowercase__ = first_layer_is_expansion
lowercase__ = finegrained_output
lowercase__ = hidden_act
lowercase__ = tf_padding
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = semantic_loss_ignore_index
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})])
@property
def UpperCAmelCase ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def UpperCAmelCase ( self : Any) -> float:
"""simple docstring"""
return 1E-4
| 717 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = PhobertTokenizer
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
snake_case__ = ['#version: 0.2', 'l à</w>']
snake_case__ = {'unk_token': '<unk>'}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def lowerCAmelCase_ ( self: str , **UpperCamelCase: Dict ) -> int:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any ) -> Optional[int]:
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
snake_case__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
snake_case__ = tokenizer.tokenize(UpperCamelCase )
print(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
| 328 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__UpperCamelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a_ ( _A ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ = k.replace(_A , _A )
return k
def a_ ( _A , _A ) -> PegasusForConditionalGeneration:
"""simple docstring"""
snake_case__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
snake_case__ = PegasusConfig(**_A )
snake_case__ = PegasusForConditionalGeneration(_A )
snake_case__ = torch_model.model.state_dict()
snake_case__ = {}
for k, v in tf_weights.items():
snake_case__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ = v.T
snake_case__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
snake_case__ = mapping['shared.weight']
snake_case__ = mapping['shared.weight']
snake_case__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
snake_case__ , snake_case__ = torch_model.model.load_state_dict(_A , strict=_A )
snake_case__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( _A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
snake_case__ = ['Adafactor', 'global_step']
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
snake_case__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
return tf_weights
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
# save tokenizer first
snake_case__ = Path(_A ).parent.name
snake_case__ = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
snake_case__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
snake_case__ = get_tf_weights_as_numpy(_A )
snake_case__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
snake_case__ = task_specific_params
snake_case__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
snake_case__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
__UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
__UpperCamelCase : List[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 328 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = 1_0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[int] ):
'''simple docstring'''
lowercase_ = 1
lowercase_ = max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowercase_ = [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase_ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
lowercase_ = 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
lowercase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE__ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
SCREAMING_SNAKE_CASE__ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
SCREAMING_SNAKE_CASE__ = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(snake_case_ )
class __lowerCamelCase :
"""simple docstring"""
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
elif titles is None or texts is None:
lowercase_ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = titles if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [titles]
lowercase_ = texts if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [texts]
lowercase_ = len(UpperCAmelCase )
lowercase_ = questions if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [questions] * n_passages
assert len(UpperCAmelCase ) == len(
UpperCAmelCase ), F'There should be as many titles than texts but got {len(UpperCAmelCase )} titles and {len(UpperCAmelCase )} texts.'
lowercase_ = super().__call__(UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )["input_ids"]
lowercase_ = super().__call__(UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )["input_ids"]
lowercase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase , UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase_ = attention_mask
return self.pad(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = 64 , UpperCAmelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase_ = reader_input["input_ids"]
lowercase_ , lowercase_ , lowercase_ = reader_output[:3]
lowercase_ = len(UpperCAmelCase )
lowercase_ = sorted(range(UpperCAmelCase ) , reverse=UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase_ = []
for doc_id in sorted_docs:
lowercase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ = sequence_ids.index(self.pad_token_id )
else:
lowercase_ = len(UpperCAmelCase )
lowercase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase , top_spans=UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase , start_index=UpperCAmelCase , end_index=UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase_ = []
for start_index, start_score in enumerate(UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] , reverse=UpperCAmelCase )
lowercase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
lowercase_ = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = DPRReaderTokenizer
| 601 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
def __init__( self : List[Any] , _A : Optional[int] , ):
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[Any] ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ):
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : Tuple , _A : Tuple , _A : List[Any] , ):
_UpperCamelCase = True
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
_UpperCamelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Tuple ):
_UpperCamelCase = TFEsmForMaskedLM(config=_A )
_UpperCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFEsmForTokenClassification(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = TFEsmModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : str ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCamelCase = model(_A )[0]
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 10 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase_ : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase (__lowerCamelCase : str ) -> List[str]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
if args.student_type == "roberta":
a__ = False
elif args.student_type == "gpt2":
a__ = False
def _lowerCamelCase (__lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Union[str, Any]:
if args.student_type == "roberta":
a__ = False
def _lowerCamelCase () -> int:
a__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__lowerCamelCase , required=__lowerCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__lowerCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=__lowerCamelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__lowerCamelCase , type=__lowerCamelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__lowerCamelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__lowerCamelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__lowerCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__lowerCamelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__lowerCamelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__lowerCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__lowerCamelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__lowerCamelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__lowerCamelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowerCamelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__lowerCamelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowerCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=__lowerCamelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=__lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__lowerCamelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__lowerCamelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__lowerCamelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__lowerCamelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__lowerCamelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__lowerCamelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__lowerCamelCase , default=4000 , help="Checkpoint interval." )
a__ = parser.parse_args()
sanity_checks(__lowerCamelCase )
# ARGS #
init_gpu_params(__lowerCamelCase )
set_seed(__lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__lowerCamelCase ) , __lowerCamelCase , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__ = MODEL_CLASSES[args.student_type]
a__ , a__ , a__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ = tokenizer.all_special_tokens.index(__lowerCamelCase )
a__ = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
a__ = special_tok_ids
a__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a__ = pickle.load(__lowerCamelCase )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a__ = pickle.load(__lowerCamelCase )
a__ = np.maximum(__lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ = 0.0 # do not predict special tokens
a__ = torch.from_numpy(__lowerCamelCase )
else:
a__ = None
a__ = LmSeqsDataset(params=__lowerCamelCase , data=__lowerCamelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
a__ = student_config_class.from_pretrained(args.student_config )
a__ = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCamelCase )
else:
a__ = student_model_class(__lowerCamelCase )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCamelCase )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCamelCase , __lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCamelCase , __lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ = Distiller(
params=__lowerCamelCase , dataset=__lowerCamelCase , token_probs=__lowerCamelCase , student=__lowerCamelCase , teacher=__lowerCamelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 489 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 1_024,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Dict = collections.OrderedDict()
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
lowercase__ : Union[str, Any] = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
lowercase__ : List[Any] = token.rstrip('''\n''' )
lowercase__ : Union[str, Any] = index
return vocab
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : str ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[Any]=200 ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = vocab
lowercase__ : Optional[Any] = unk_token
lowercase__ : int = max_input_chars_per_word
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = list(_snake_case )
if len(_snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase__ : List[str] = 0
lowercase__ : List[str] = []
while start < len(_snake_case ):
lowercase__ : str = len(_snake_case )
lowercase__ : Union[str, Any] = None
while start < end:
lowercase__ : Optional[int] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase__ : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_snake_case )
lowercase__ : int = end
return sub_tokens
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : str = ["input_ids", "attention_mask"]
lowerCAmelCase : Optional[Any] = False
def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : List[str]="<d>" ,_snake_case : Dict="</d>" ,_snake_case : Dict="<s>" ,_snake_case : Optional[int]="</s>" ,_snake_case : Dict="<pad>" ,_snake_case : Tuple="<unk>" ,_snake_case : Union[str, Any]="</n>" ,_snake_case : Optional[Any]="</_>" ,_snake_case : Union[str, Any]="left" ,**_snake_case : List[str] ,) -> Dict:
"""simple docstring"""
requires_backends(self ,['''jieba'''] )
super().__init__(
bod_token=_snake_case ,eod_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,unk_token=_snake_case ,line_token=_snake_case ,space_token=_snake_case ,padding_side=_snake_case ,**_snake_case ,)
lowercase__ : int = bod_token
lowercase__ : str = eod_token
lowercase__ : List[Any] = load_vocab(_snake_case )
lowercase__ : int = self.encoder[space_token]
lowercase__ : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase__ : Any = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda _snake_case : x[1] ) )
lowercase__ : Any = {v: k for k, v in self.encoder.items()}
lowercase__ : str = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = []
for x in jieba.cut(_snake_case ,cut_all=_snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_snake_case ) )
return output_tokens
def UpperCAmelCase ( self : Dict ,_snake_case : List[str] ,**_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = [i for i in token_ids if i >= 0]
lowercase__ : List[str] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
return token in self.encoder
def UpperCAmelCase ( self : List[str] ,_snake_case : List[str] ) -> str:
"""simple docstring"""
return "".join(_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any ) -> str:
"""simple docstring"""
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_snake_case ):
lowercase__ : Union[str, Any] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase__ : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase__ : List[Any] = 0
if " " in self.encoder:
lowercase__ : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase__ : int = self.encoder['''\n''']
del self.encoder["\n"]
lowercase__ : Dict = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda _snake_case : x[1] ) )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase__ : str = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase ( self : List[str] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case ))
| 718 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ,_snake_case : str=13 ,_snake_case : int=64 ,_snake_case : Dict=2 ,_snake_case : int=3 ,_snake_case : Optional[Any]=True ,_snake_case : List[str]=True ,_snake_case : Dict=32 ,_snake_case : int=5 ,_snake_case : Any=4 ,_snake_case : Optional[int]=37 ,_snake_case : Dict="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : int=10 ,_snake_case : Any=0.02 ,_snake_case : List[str]=[1, 16, 4, 4] ,_snake_case : str=None ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Tuple = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : Dict = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : str = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ : List[str] = (self.image_size // 32) ** 2
lowercase__ : List[str] = num_patches + 1
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_snake_case ,)
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.type_sequence_label_size
lowercase__ : str = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[Any] = config_and_inputs
lowercase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Optional[int] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = ViTHybridModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@slow
@require_accelerate
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowercase__ : Dict = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' ,device_map='''auto''' )
lowercase__ : Optional[int] = prepare_img()
lowercase__ : List[str] = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : Union[str, Any] = model(**_snake_case )
lowercase__ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] ,'''tabby, tabby cat''' )
| 122 | 0 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
def get_masked_lm_array(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
__SCREAMING_SNAKE_CASE = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_array(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
__SCREAMING_SNAKE_CASE = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_layer_array(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
__SCREAMING_SNAKE_CASE = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_attention_layer_array(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = array.reshape(lowerCAmelCase_ )
if "kernel" in name:
__SCREAMING_SNAKE_CASE = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
print(f"""Loading model based on config from {config_path}...""" )
__SCREAMING_SNAKE_CASE = BertConfig.from_json_file(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = BertForMaskedLM(lowerCAmelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__SCREAMING_SNAKE_CASE = model.bert.encoder.layer[layer_index]
# Self-attention
__SCREAMING_SNAKE_CASE = layer.attention.self
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_query_dense/bias" , self_attn.query.bias.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_key_dense/bias" , self_attn.key.bias.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
__SCREAMING_SNAKE_CASE = layer.attention.output
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_attention_layer_array(
lowerCAmelCase_ , "_output_dense/bias" , self_output.dense.bias.data.shape )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_attention_layer_norm/gamma" )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_attention_layer_norm/beta" )
# Intermediate
__SCREAMING_SNAKE_CASE = layer.intermediate
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_intermediate_dense/kernel" )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_intermediate_dense/bias" )
# Output
__SCREAMING_SNAKE_CASE = layer.output
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_output_dense/kernel" )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_output_dense/bias" )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_output_layer_norm/gamma" )
__SCREAMING_SNAKE_CASE = get_encoder_layer_array(lowerCAmelCase_ , "_output_layer_norm/beta" )
# Embeddings
__SCREAMING_SNAKE_CASE = get_encoder_array("_position_embedding_layer/embeddings" )
__SCREAMING_SNAKE_CASE = get_encoder_array("_type_embedding_layer/embeddings" )
__SCREAMING_SNAKE_CASE = get_encoder_array("_embedding_norm_layer/gamma" )
__SCREAMING_SNAKE_CASE = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
__SCREAMING_SNAKE_CASE = model.cls.predictions.transform
__SCREAMING_SNAKE_CASE = get_masked_lm_array("dense/kernel" )
__SCREAMING_SNAKE_CASE = get_masked_lm_array("dense/bias" )
__SCREAMING_SNAKE_CASE = get_masked_lm_array("layer_norm/gamma" )
__SCREAMING_SNAKE_CASE = get_masked_lm_array("layer_norm/beta" )
__SCREAMING_SNAKE_CASE = get_masked_lm_array("embedding_table" )
# Pooling
__SCREAMING_SNAKE_CASE = BertPooler(config=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = get_encoder_array("_pooler_layer/kernel" )
__SCREAMING_SNAKE_CASE = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(lowerCAmelCase_ )
# Integration test - should load without any errors ;)
__SCREAMING_SNAKE_CASE = BertForMaskedLM.from_pretrained(lowerCAmelCase_ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
a__ : Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 682 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ : Dict = logging.get_logger(__name__)
# General docstring
a__ : str = '''RegNetConfig'''
# Base docstring
a__ : List[str] = '''facebook/regnet-y-040'''
a__ : int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a__ : int = '''facebook/regnet-y-040'''
a__ : str = '''tabby, tabby cat'''
a__ : Optional[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any:
super().__init__(**UpperCAmelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , )
__SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = config.num_channels
__SCREAMING_SNAKE_CASE = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" )
__SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ )
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" )
__SCREAMING_SNAKE_CASE = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ )
for layer_module in self.attention:
__SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_state * pooled
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width )
__SCREAMING_SNAKE_CASE = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__SCREAMING_SNAKE_CASE = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ),
]
__SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any:
__SCREAMING_SNAKE_CASE = hidden_state
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width )
__SCREAMING_SNAKE_CASE = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__SCREAMING_SNAKE_CASE = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ),
]
__SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = hidden_state
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__SCREAMING_SNAKE_CASE = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ),
*[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int:
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention:
__SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
__SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ )
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
@keras_serializable
class UpperCamelCase_ ( tf.keras.layers.Layer):
"""simple docstring"""
snake_case__ : Any = RegNetConfig
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" )
__SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" )
__SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" )
@unpack_inputs
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = encoder_outputs[0]
__SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ )
# Change to NCHW output format have uniformity in the modules
__SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
__SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = RegNetConfig
snake_case__ : List[str] = "regnet"
snake_case__ : str = "pixel_values"
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
a__ : Union[str, Any] = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Optional[int] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple:
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.regnet(
pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any:
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = config.num_labels
__SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" )
# classification head
__SCREAMING_SNAKE_CASE = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.regnet(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
__SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ )
if not return_dict:
__SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 682 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = '''xlm'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , snake_case=3_0145 , snake_case=2048 , snake_case=12 , snake_case=16 , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=1 , snake_case=True , snake_case=512 , snake_case=2048**-0.5 , snake_case=1e-1_2 , snake_case=0.02 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=3 , snake_case=5 , snake_case=True , snake_case="first" , snake_case=True , snake_case=None , snake_case=True , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=0 , snake_case=0 , snake_case=2 , snake_case=0 , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = emb_dim
snake_case_ = n_layers
snake_case_ = n_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = use_lang_emb
snake_case_ = layer_norm_eps
snake_case_ = bos_index
snake_case_ = eos_index
snake_case_ = pad_index
snake_case_ = unk_index
snake_case_ = mask_index
snake_case_ = is_encoder
snake_case_ = max_position_embeddings
snake_case_ = embed_init_std
snake_case_ = init_std
snake_case_ = summary_type
snake_case_ = summary_use_proj
snake_case_ = summary_activation
snake_case_ = summary_proj_to_labels
snake_case_ = summary_first_dropout
snake_case_ = start_n_top
snake_case_ = end_n_top
snake_case_ = mask_token_id
snake_case_ = lang_id
if "n_words" in kwargs:
snake_case_ = kwargs['n_words']
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , **snake_case )
class lowercase ( lowercase_ ):
@property
def a ( self ):
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 715 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''blip_2_vision_model'''
def __init__( self , snake_case=1408 , snake_case=6144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=0.0_00_01 , snake_case=0.0 , snake_case=1e-1_0 , snake_case=True , **snake_case , ):
super().__init__(**snake_case )
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_range
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = hidden_act
snake_case_ = qkv_bias
@classmethod
def a ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = '''blip_2_qformer'''
def __init__( self , snake_case=3_0522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1408 , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = cross_attention_frequency
snake_case_ = encoder_hidden_size
@classmethod
def a ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case_ = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''blip-2'''
__SCREAMING_SNAKE_CASE : Dict = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ):
super().__init__(**snake_case )
if vision_config is None:
snake_case_ = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
snake_case_ = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
snake_case_ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
snake_case_ = BlipaVisionConfig(**snake_case )
snake_case_ = BlipaQFormerConfig(**snake_case )
snake_case_ = text_config['model_type'] if 'model_type' in text_config else 'opt'
snake_case_ = CONFIG_MAPPING[text_model_type](**snake_case )
snake_case_ = self.text_config.tie_word_embeddings
snake_case_ = self.text_config.is_encoder_decoder
snake_case_ = num_query_tokens
snake_case_ = self.vision_config.hidden_size
snake_case_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case_ = 1.0
snake_case_ = 0.02
@classmethod
def a ( cls , snake_case , snake_case , snake_case , **snake_case , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.qformer_config.to_dict()
snake_case_ = self.text_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 108 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __lowercase ( snake_case, snake_case=False, snake_case=False, snake_case=False ):
"""simple docstring"""
__magic_name__ :List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__magic_name__ :int = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :Tuple = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ :Any = in_proj_bias[: config.hidden_size]
__magic_name__ :Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :Dict = in_proj_bias[-config.hidden_size :]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = dct.pop(snake_case )
__magic_name__ :Union[str, Any] = val
@torch.no_grad()
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = ViltConfig(image_size=3_8_4, patch_size=3_2, tie_word_embeddings=snake_case )
__magic_name__ :List[str] = False
__magic_name__ :List[str] = False
__magic_name__ :List[Any] = False
__magic_name__ :Union[str, Any] = False
if "vqa" in checkpoint_url:
__magic_name__ :Union[str, Any] = True
__magic_name__ :List[str] = 3_1_2_9
__magic_name__ :List[Any] = '''huggingface/label-files'''
__magic_name__ :List[str] = '''vqa2-id2label.json'''
__magic_name__ :Optional[Any] = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :List[Any] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Dict = idalabel
__magic_name__ :Tuple = {v: k for k, v in idalabel.items()}
__magic_name__ :List[str] = ViltForQuestionAnswering(snake_case )
elif "nlvr" in checkpoint_url:
__magic_name__ :Optional[Any] = True
__magic_name__ :List[Any] = 2
__magic_name__ :Tuple = {0: '''False''', 1: '''True'''}
__magic_name__ :List[Any] = {v: k for k, v in config.idalabel.items()}
__magic_name__ :Any = 3
__magic_name__ :Optional[int] = ViltForImagesAndTextClassification(snake_case )
elif "irtr" in checkpoint_url:
__magic_name__ :List[str] = True
__magic_name__ :Tuple = ViltForImageAndTextRetrieval(snake_case )
elif "mlm_itm" in checkpoint_url:
__magic_name__ :Dict = True
__magic_name__ :List[Any] = ViltForMaskedLM(snake_case )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__magic_name__ :Optional[Any] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )['''state_dict''']
__magic_name__ :str = create_rename_keys(snake_case, snake_case, snake_case, snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, snake_case )
if mlm_model or irtr_model:
__magic_name__ :int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ , __magic_name__ :Union[str, Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case )
# Define processor
__magic_name__ :Optional[int] = ViltImageProcessor(size=3_8_4 )
__magic_name__ :Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ :Any = ViltProcessor(snake_case, snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ :Dict = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :Union[str, Any] = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
__magic_name__ :Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''', stream=snake_case ).raw )
if mlm_model:
__magic_name__ :int = '''a bunch of [MASK] laying on a [MASK].'''
else:
__magic_name__ :Union[str, Any] = '''How many cats are there?'''
__magic_name__ :Any = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :List[str] = model(**snake_case )
# Verify outputs
if mlm_model:
__magic_name__ :Any = torch.Size([1, 1_1, 3_0_5_2_2] )
__magic_name__ :Any = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify masked token prediction equals "cats"
__magic_name__ :List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ :Union[str, Any] = torch.Size([1, 3_1_2_9] )
__magic_name__ :Union[str, Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify vqa prediction equals "2"
__magic_name__ :Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ :List[Any] = torch.Size([1, 2] )
__magic_name__ :Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
'''simple docstring'''
import torch
def snake_case_ ( ):
if torch.cuda.is_available():
UpperCAmelCase_ : List[str] = torch.cuda.device_count()
else:
UpperCAmelCase_ : List[Any] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main() | 713 |
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 641 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModel.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 709 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=14 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = use_mc_token_ids
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = self.vocab_size - 1
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
if self.use_mc_token_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> int:
__lowerCAmelCase = CTRLModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
model(snake_case_ , token_type_ids=snake_case_ )
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> List[str]:
__lowerCAmelCase = CTRLLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> Dict:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = CTRLForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_snake_case = (CTRLLMHeadModel,) if is_torch_available() else ()
_snake_case = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = CTRLModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , n_embd=37 )
def A__ ( self ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self ) -> List[Any]:
pass
@slow
def A__ ( self ) -> Union[str, Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = CTRLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self ) -> Tuple:
pass
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self ) -> int:
__lowerCAmelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(snake_case_ )
__lowerCAmelCase = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=snake_case_ ) # Legal the president is
__lowerCAmelCase = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCAmelCase = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 573 | 0 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 200_0000 ):
lowercase__ = [0 for i in range(n + 1 )]
lowercase__ = 1
lowercase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1
lowercase__ = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 413 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _snake_case ( unittest.TestCase):
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Any ):
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Union[str, Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[Any] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
| 413 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Dict = 16
lowerCAmelCase : int = 32
def a__ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" ) -> int:
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
model.eval()
lowerCamelCase = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def a__ ( snake_case__ , snake_case__ ) -> List[str]:
# Initialize accelerator
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(snake_case__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
lowerCamelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase = int(snake_case__ ) + 1
lowerCamelCase = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.print("""resumed checkpoint performance:""" , snake_case__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
lowerCamelCase = json.load(snake_case__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase = F'epoch_{epoch}'
lowerCamelCase = os.path.join(args.output_dir , snake_case__ )
accelerator.save_state(snake_case__ )
lowerCamelCase = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = accuracy
lowerCamelCase = lr_scheduler.get_lr()[0]
lowerCamelCase = optimizer.param_groups[0]["""lr"""]
lowerCamelCase = epoch
lowerCamelCase = overall_step
accelerator.print(F'epoch {epoch}:' , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> List[Any]:
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=snake_case__ , default=snake_case__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=snake_case__ , default=snake_case__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=2 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 533 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase : Optional[Any] = """sshleifer/bart-tiny-random"""
lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=_a , d=_a )
| 533 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return 0
snake_case_ = [0] * len(SCREAMING_SNAKE_CASE__ )
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) )
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
_UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(images=snake_case , visual_prompt=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case )
_UpperCAmelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
| 573 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : str = b.T
lowercase__ : Tuple = np.sum(np.square(lowercase_ ) , axis=1 )
lowercase__ : int = np.sum(np.square(lowercase_ ) , axis=0 )
lowercase__ : Optional[Any] = np.matmul(lowercase_ , lowercase_ )
lowercase__ : Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowercase__ : int = x.reshape(-1 , 3 )
lowercase__ : Dict = squared_euclidean_distance(lowercase_ , lowercase_ )
return np.argmin(lowercase_ , axis=1 )
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = size if size is not None else {"""height""": 2_56, """width""": 2_56}
lowercase__ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE_) if clusters is not None else None
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Tuple = size
lowercase__ : Optional[Any] = resample
lowercase__ : int = do_normalize
lowercase__ : List[str] = do_color_quantize
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_)
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}')
return resize(
SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ):
'''simple docstring'''
lowercase__ : Tuple = rescale(image=SCREAMING_SNAKE_CASE_ , scale=1 / 1_27.5 , data_format=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = image - 1
return image
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : List[str] = size if size is not None else self.size
lowercase__ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : int = resample if resample is not None else self.resample
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ : Union[str, Any] = clusters if clusters is not None else self.clusters
lowercase__ : int = np.array(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""")
# All transformations expect numpy arrays.
lowercase__ : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_) for image in images]
if do_color_quantize:
lowercase__ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ : int = np.array(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = color_quantize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
lowercase__ : Any = images.shape[0]
lowercase__ : Dict = images.reshape(SCREAMING_SNAKE_CASE_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ : Any = list(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : List[Any] = {"""input_ids""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 705 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = 'dpt'
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=3_84 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[2, 5, 8, 11] , SCREAMING_SNAKE_CASE_="project" , SCREAMING_SNAKE_CASE_=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE_=[96, 1_92, 3_84, 7_68] , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.4 , SCREAMING_SNAKE_CASE_=2_55 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=[1, 10_24, 24, 24] , SCREAMING_SNAKE_CASE_=[0, 1] , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = hidden_size
lowercase__ : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""")
lowercase__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowercase__ : Optional[Any] = BitConfig(**SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
logger.info("""Initializing the config with a `BiT` backbone.""")
lowercase__ : List[Any] = BitConfig(**SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : int = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.')
lowercase__ : Optional[Any] = backbone_featmap_shape
lowercase__ : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""")
else:
lowercase__ : Optional[int] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = []
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : List[str] = image_size
lowercase__ : Tuple = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : Dict = qkv_bias
lowercase__ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""")
lowercase__ : Dict = readout_type
lowercase__ : Tuple = reassemble_factors
lowercase__ : Any = neck_hidden_sizes
lowercase__ : Dict = fusion_hidden_size
lowercase__ : Tuple = head_in_index
lowercase__ : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ : Tuple = use_auxiliary_head
lowercase__ : int = auxiliary_loss_weight
lowercase__ : Union[str, Any] = semantic_loss_ignore_index
lowercase__ : str = semantic_classifier_dropout
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
lowercase__ : Optional[Any] = self.backbone_config.to_dict()
lowercase__ : Dict = self.__class__.model_type
return output
| 495 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ : Tuple = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
SCREAMING_SNAKE_CASE_ : int = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE_ : str = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : Tuple , _A : List[str]=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''bilinear'''
UpperCAmelCase__ : Tuple = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Any , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Any = size * 1.0 / min(_A , _A )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scale * h, size
if max(_A , _A ) > self.max_size:
UpperCAmelCase__ : List[Any] = self.max_size * 1.0 / max(_A , _A )
UpperCAmelCase__ : List[Any] = newh * scale
UpperCAmelCase__ : Optional[Any] = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Optional[int] = Image.fromarray(_A )
UpperCAmelCase__ : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : str = np.asarray(_A )
else:
UpperCAmelCase__ : Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Union[str, Any] = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A ).squeeze(0 )
img_augs.append(_A )
return img_augs
class lowerCamelCase_ :
def __init__( self : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Tuple = cfg.INPUT.FORMAT
UpperCAmelCase__ : str = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : List[Any] = cfg.PAD_VALUE
UpperCAmelCase__ : Optional[int] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : List[str] = cfg.MODEL.DEVICE
UpperCAmelCase__ : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[Any] = lambda _A : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self : Dict , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = tuple(max(_A ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A )
]
return torch.stack(_A ), torch.tensor(_A )
def __call__( self : Tuple , _A : List[str] , _A : str=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = [images]
if single_image:
assert len(_A ) == 1
for i in range(len(_A ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_A , images.pop(_A ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : str = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : List[Any] = self.aug(_A )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[Any] = [self.normalizer(_A ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.pad(_A )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : int = torch.true_divide(_A , _A )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
| 312 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : str = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase__ : Dict = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def lowercase_ ( self : List[str] , **_A : Dict ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''tester'''
UpperCAmelCase__ : Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : str = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase__ : int = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
UpperCAmelCase__ : Any = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(_A )
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_ids(_A )
UpperCAmelCase__ : Tuple = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _A )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 312 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = """Hello, World!"""
__lowerCamelCase : Optional[int] = """en_XX"""
def A__ ( _a : str , _a : str , _a : bool ):
'''simple docstring'''
snake_case__ : Union[str, Any] =Path("""data_bin""" )
snake_case__ : Dict =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_a ).parent ) , checkpoint_file=Path(_a ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_a ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_a ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_a )
snake_case__ : Optional[int] =xmod.model.encoder.sentence_encoder
snake_case__ : Optional[int] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case__ : Dict =xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _a )
snake_case__ : List[Any] =XmodForSequenceClassification(_a ) if classification_head else XmodForMaskedLM(_a )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case__ : Any =xmod_sent_encoder.embed_tokens.weight
snake_case__ : List[str] =xmod_sent_encoder.embed_positions.weight
snake_case__ : Dict =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case__ : int =xmod_sent_encoder.layernorm_embedding.weight
snake_case__ : Dict =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case__ : str =model.roberta.encoder.layer[i]
snake_case__ : Union[str, Any] =xmod_sent_encoder.layers[i]
# self attention
snake_case__ : Union[str, Any] =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
snake_case__ : Dict =xmod_layer.self_attn.q_proj.weight
snake_case__ : str =xmod_layer.self_attn.q_proj.bias
snake_case__ : int =xmod_layer.self_attn.k_proj.weight
snake_case__ : Tuple =xmod_layer.self_attn.k_proj.bias
snake_case__ : int =xmod_layer.self_attn.v_proj.weight
snake_case__ : Dict =xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case__ : List[Any] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
snake_case__ : Dict =xmod_layer.self_attn.out_proj.weight
snake_case__ : List[str] =xmod_layer.self_attn.out_proj.bias
snake_case__ : int =xmod_layer.self_attn_layer_norm.weight
snake_case__ : List[str] =xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case__ : Tuple =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
snake_case__ : Union[str, Any] =xmod_layer.fca.weight
snake_case__ : int =xmod_layer.fca.bias
# output
snake_case__ : Optional[Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
snake_case__ : List[str] =xmod_layer.fca.weight
snake_case__ : Dict =xmod_layer.fca.bias
snake_case__ : Tuple =xmod_layer.final_layer_norm.weight
snake_case__ : Optional[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case__ : Dict =xmod_layer.adapter_layer_norm.weight
snake_case__ : List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case__ : Tuple =bert_output.adapter_modules[lang_code]
snake_case__ : int =xmod_layer.adapter_modules[lang_code]
snake_case__ : List[Any] =from_adapter.fca.weight
snake_case__ : str =from_adapter.fca.bias
snake_case__ : List[str] =from_adapter.fca.weight
snake_case__ : int =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case__ : Union[str, Any] =xmod_sent_encoder.layer_norm.weight
snake_case__ : Any =xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case__ : Optional[int] =xmod.model.classification_heads["""mnli"""].dense.weight
snake_case__ : Tuple =xmod.model.classification_heads["""mnli"""].dense.bias
snake_case__ : Any =xmod.model.classification_heads["""mnli"""].out_proj.weight
snake_case__ : int =xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
snake_case__ : Optional[int] =xmod.model.encoder.lm_head.dense.weight
snake_case__ : Any =xmod.model.encoder.lm_head.dense.bias
snake_case__ : Any =xmod.model.encoder.lm_head.layer_norm.weight
snake_case__ : Optional[Any] =xmod.model.encoder.lm_head.layer_norm.bias
snake_case__ : str =xmod.model.encoder.lm_head.weight
snake_case__ : int =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case__ : Dict =xmod.encode(_a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_a )
snake_case__ : List[str] =model(_a )[0]
if classification_head:
snake_case__ : Union[str, Any] =xmod.model.classification_heads["""mnli"""](xmod.extract_features(_a ) )
else:
snake_case__ : int =xmod.model(_a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case__ : Union[str, Any] =torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
snake_case__ : Any =torch.allclose(_a , _a , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_a ).mkdir(parents=_a , exist_ok=_a )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowerCamelCase : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 385 |
__lowerCamelCase : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( _a : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( _a : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
'''simple docstring'''
snake_case__ : List[Any] ="""Morse code here!"""
print(_a )
snake_case__ : Union[str, Any] =encrypt(_a )
print(_a )
snake_case__ : Optional[int] =decrypt(_a )
print(_a )
if __name__ == "__main__":
main()
| 385 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[Any] = """levit"""
def __init__( self , snake_case_=2_2_4 , snake_case_=3 , snake_case_=3 , snake_case_=2 , snake_case_=1 , snake_case_=1_6 , snake_case_=[1_2_8, 2_5_6, 3_8_4] , snake_case_=[4, 8, 1_2] , snake_case_=[4, 4, 4] , snake_case_=[1_6, 1_6, 1_6] , snake_case_=0 , snake_case_=[2, 2, 2] , snake_case_=[2, 2, 2] , snake_case_=0.02 , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A_ : int = image_size
A_ : Tuple = num_channels
A_ : List[Any] = kernel_size
A_ : Dict = stride
A_ : Tuple = padding
A_ : Optional[Any] = hidden_sizes
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = depths
A_ : Any = key_dim
A_ : Union[str, Any] = drop_path_rate
A_ : Dict = patch_size
A_ : List[str] = attention_ratio
A_ : Any = mlp_ratio
A_ : int = initializer_range
A_ : str = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[int] = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1E-4 | 302 |
"""simple docstring"""
import sys
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = len(_UpperCAmelCase )
A_ : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
A_ : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
A_ : Optional[Any] = a + chain_length - 1
A_ : List[str] = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : Optional[Any] = cost
A_ : Optional[int] = c
return matrix, sol
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i == j:
print('A' + str(_UpperCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(')' , end=' ' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
A_ : Optional[Any] = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_ , A_ : int = matrix_chain_order(_UpperCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 302 | 1 |
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0 for i in range(len(A__ ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE_ = 0, 0
for i in range(1 , len(A__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE_ = min_edge
while go_next(A__ , A__ , A__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE_ = i, i + z_result[i] - 1
return z_result
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return i + z_result[i] < len(A__ ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(A__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
import os
from math import logaa
def A_ ( A__ = "base_exp.txt" ) -> int:
a__ : float = 0
a__ : Optional[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
a__ , a__ : List[str] = list(map(A__ , line.split(',' ) ) )
if x * logaa(A__ ) > largest:
a__ : Dict = x * logaa(A__ )
a__ : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 302 | 0 |
"""simple docstring"""
import socket
def snake_case ( ):
UpperCAmelCase_ : List[Any] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
UpperCAmelCase_ : Dict = socket.gethostname()
UpperCAmelCase_ : List[Any] = 1_23_12
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" ,"wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
UpperCAmelCase_ : str = sock.recv(10_24 )
if not data:
break
out_file.write(A__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 463 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 463 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : str = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
a__ : str = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__ : Dict = 0
a__ : Union[str, Any] = 0XE000
a__ : int = 0XE001
a__ : Tuple = 0XE002
a__ : int = 0XE003
a__ : str = 0XE004
# Maps special codepoints to human-readable names.
a__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase__ : str=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : Tuple=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : Dict=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : str=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : Optional[Any]=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : Dict=chr(UpperCAmelCase__ ) , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=2_0_4_8 , **UpperCAmelCase__ : Optional[int] , ) -> Tuple:
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , model_max_length=UpperCAmelCase__ , **UpperCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
__SCREAMING_SNAKE_CASE = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__SCREAMING_SNAKE_CASE = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__SCREAMING_SNAKE_CASE = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__SCREAMING_SNAKE_CASE = UNICODE_VOCAB_SIZE
__SCREAMING_SNAKE_CASE = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
return list(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str ) -> int:
try:
return ord(UpperCAmelCase__ )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase__ )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
return "".join(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase__ )) + [1]
return result
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Union[str, Any]:
return ()
| 682 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ : str = logging.get_logger(__name__)
class UpperCamelCase_ ( enum.Enum):
"""simple docstring"""
snake_case__ : Optional[int] = 0
snake_case__ : Dict = 1
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Tuple = "generated"
def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {}
if truncation is not None:
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = generate_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_tensors is not None and return_type is None:
__SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]:
return True
def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
__SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],)
__SCREAMING_SNAKE_CASE = True
elif isinstance(args[0] , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = (prefix + args[0],)
__SCREAMING_SNAKE_CASE = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple:
__SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any:
if self.framework == "pt":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy()
__SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length )
__SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
__SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = output_ids.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__SCREAMING_SNAKE_CASE = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = "summary"
def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]:
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = "translation"
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]:
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
__SCREAMING_SNAKE_CASE = src_lang
if tgt_lang is not None:
__SCREAMING_SNAKE_CASE = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task )
__SCREAMING_SNAKE_CASE = task.split("_" )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
__SCREAMING_SNAKE_CASE = items[1]
__SCREAMING_SNAKE_CASE = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]:
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 682 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_A : Optional[int] = ['small', 'medium', 'large']
_A : List[str] = 'lm_head.decoder.weight'
_A : str = 'lm_head.weight'
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Tuple = torch.load(__snake_case )
lowerCamelCase__ : List[str] = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_A : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_A : Tuple = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_A : List[str] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Union[str, Any] = MaskFormerConfig(backbone_config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : int = 847
lowerCamelCase__ : Dict = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : Dict = 150
lowerCamelCase__ : Optional[int] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : List[str] = 171
lowerCamelCase__ : Dict = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : Dict = 133
lowerCamelCase__ : Tuple = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : int = 19
lowerCamelCase__ : Dict = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : List[Any] = 65
lowerCamelCase__ : Optional[int] = '''mapillary-vistas-id2label.json'''
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[: dim]
lowerCamelCase__ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : str = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# fmt: off
lowerCamelCase__ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase__ : str = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Dict = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : int = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : int = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Any = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def _a ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[int] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_maskformer_config(UpperCAmelCase )
# load original state_dict
with open(UpperCAmelCase , '''rb''' ) as f:
lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Tuple = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : Tuple = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_swin_q_k_v(UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase , UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCAmelCase )
# load 🤗 model
lowerCamelCase__ : Any = MaskFormerForInstanceSegmentation(UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase , param.shape )
lowerCamelCase__ , lowerCamelCase__ : Any = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase__ : List[str] = prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : Any = 65
elif "cityscapes" in model_name:
lowerCamelCase__ : Optional[Any] = 65535
else:
lowerCamelCase__ : List[Any] = 255
lowerCamelCase__ : int = True if '''ade''' in model_name else False
lowerCamelCase__ : str = MaskFormerImageProcessor(ignore_index=UpperCAmelCase , reduce_labels=UpperCAmelCase )
lowerCamelCase__ : List[str] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase__ : Dict = model(**UpperCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : Any = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 130 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__lowerCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_A = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_A = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = train_dataset.features['label'].names
if training_args.do_eval:
_A = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = eval_dataset.features['label'].names
if training_args.do_predict:
_A = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = predict_dataset.features['label'].names
# Labels
_A = len(__snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel={str(__snake_case ): label for i, label in enumerate(__snake_case )} , labelaid={label: i for i, label in enumerate(__snake_case )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_A = False
def preprocess_function(__snake_case : List[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=__snake_case , max_length=data_args.max_seq_length , truncation=__snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = min(len(__snake_case ) , data_args.max_train_samples )
_A = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_A = train_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = min(len(__snake_case ) , data_args.max_eval_samples )
_A = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_A = eval_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_A = min(len(__snake_case ) , data_args.max_predict_samples )
_A = predict_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_A = predict_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_A = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case : EvalPrediction ):
_A = p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
_A = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_A = default_data_collator
elif training_args.fpaa:
_A = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
_A = None
# Initialize our Trainer
_A = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__snake_case )
_A = train_result.metrics
_A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_A = min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __snake_case )
trainer.save_metrics('train' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_A = trainer.evaluate(eval_dataset=__snake_case )
_A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
_A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_A , _A , _A = trainer.predict(__snake_case , metric_key_prefix='predict' )
_A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__snake_case )
)
_A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('predict' , __snake_case )
trainer.save_metrics('predict' , __snake_case )
_A = np.argmax(__snake_case , axis=1 )
_A = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(__snake_case ):
_A = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 107 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
def _A ( __snake_case :int = 100_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set(range(3 , __snake_case , 2 ) )
primes.add(2 )
for p in range(3 , __snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __snake_case , __snake_case ) ) )
__SCREAMING_SNAKE_CASE = [float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case , limit + 1 , __snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 214 |
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str]="attention" ):
snake_case__ = snake_case__ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
snake_case__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case__ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
snake_case__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case__ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
snake_case__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case__ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
snake_case__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _lowercase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=False ):
if split_mlp_wi:
snake_case__ = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
snake_case__ = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
snake_case__ = (wi_a, wi_a)
else:
snake_case__ = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
snake_case__ = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _lowercase ( __UpperCamelCase : dict , *, __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : bool = False ):
snake_case__ = traverse_util.flatten_dict(variables["""target"""] )
snake_case__ = {"""/""".join(__UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __UpperCamelCase )
snake_case__ = collections.OrderedDict()
# Shared embeddings.
snake_case__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case__ = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" )
snake_case__ = layer_norm
snake_case__ = k.T
snake_case__ = o.T
snake_case__ = q.T
snake_case__ = v.T
# Block i, layer 1 (MLP).
snake_case__ = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case__ , snake_case__ = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase )
snake_case__ = layer_norm
if split_mlp_wi:
snake_case__ = wi[0].T
snake_case__ = wi[1].T
else:
snake_case__ = wi.T
snake_case__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ = tax_relpos_bias_lookup(
__UpperCamelCase , __UpperCamelCase , """encoder""" ).T
snake_case__ = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case__ = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """encoder""" ).T
snake_case__ = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case__ = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" )
snake_case__ = layer_norm
snake_case__ = k.T
snake_case__ = o.T
snake_case__ = q.T
snake_case__ = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case__ = layer_norm
snake_case__ = k.T
snake_case__ = o.T
snake_case__ = q.T
snake_case__ = v.T
# Block i, layer 2 (MLP).
snake_case__ = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case__ , snake_case__ = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase )
snake_case__ = layer_norm
if split_mlp_wi:
snake_case__ = wi[0].T
snake_case__ = wi[1].T
else:
snake_case__ = wi.T
snake_case__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ = tax_relpos_bias_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" ).T
snake_case__ = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ = old["""decoder/logits_dense/kernel"""].T
return new
def _lowercase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : bool ):
snake_case__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case__ = state_dict["""shared.weight"""]
return state_dict
def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
snake_case__ = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case__ = convert_tax_to_pytorch(
__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase , scalable_attention=__UpperCamelCase )
snake_case__ = make_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
snake_case__ = MTaConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ = UMTaEncoderModel(__UpperCamelCase )
else:
snake_case__ = UMTaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 214 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase : Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> int:
snake_case__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
snake_case__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 214 | 1 |
import math
def A__ ( __A : int ) ->list:
__A =[True] * n
__A =False
__A =False
__A =True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__A =i * 2
while index < n:
__A =False
__A =index + i
__A =[2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def A__ ( __A : int = 99_99_66_66_33_33 ) ->int:
__A =math.floor(math.sqrt(__A ) ) + 1_00
__A =prime_sieve(__A )
__A =0
__A =0
__A =primes[prime_index]
while (last_prime**2) <= limit:
__A =primes[prime_index + 1]
__A =last_prime**2
__A =next_prime**2
# Get numbers divisible by lps(current)
__A =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__A =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__A =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__A =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 516 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar('''T''')
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =None
__A =len(lowercase__ )
__A =[any_type for _ in range(self.N )] + arr
__A =fnc
self.build()
def __UpperCamelCase ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
p += self.N
__A =v
while p > 1:
__A =p // 2
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ): # noqa: E741
'''simple docstring'''
__A , __A =l + self.N, r + self.N
__A =None
while l <= r:
if l % 2 == 1:
__A =self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
__A =self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
__A , __A =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase : Dict = SegmentTree(test_array, min)
_lowerCamelCase : int = SegmentTree(test_array, max)
_lowerCamelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ) ->None:
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 516 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Tuple = None
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__a ,'''feature_size''' ) )
self.assertTrue(hasattr(__a ,'''sampling_rate''' ) )
self.assertTrue(hasattr(__a ,'''padding_value''' ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a ,processed_features[input_name] ) ) )
UpperCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
UpperCAmelCase__ : int = BatchFeature({input_name: speech_inputs} ,tensor_type='''np''' )
UpperCAmelCase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
UpperCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Any = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} ,tensor_type='''pt''' )
UpperCAmelCase__ : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
UpperCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='''tf''' )
UpperCAmelCase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_=False ) -> Union[str, Any]:
'''simple docstring'''
def _inputs_have_equal_length(lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__a ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase_ ,lowerCamelCase_ ):
if len(__a ) != len(__a ):
return False
for input_slice_a, input_slice_a in zip(__a ,__a ):
if not np.allclose(np.asarray(__a ) ,np.asarray(__a ) ,atol=1e-3 ):
return False
return True
UpperCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a )
UpperCAmelCase__ : Dict = feat_extract.model_input_names[0]
UpperCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : List[Any] = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ : Tuple = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ : List[str] = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ : Dict = self.feat_extract_tester.batch_size
UpperCAmelCase__ : List[Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ : Tuple = feat_extract.pad(__a ,padding=__a )
UpperCAmelCase__ : List[Any] = input_a[input_name]
UpperCAmelCase__ : Dict = feat_extract.pad(__a ,padding='''longest''' )
UpperCAmelCase__ : List[str] = input_a[input_name]
UpperCAmelCase__ : str = feat_extract.pad(__a ,padding='''max_length''' ,max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ : Optional[Any] = input_a[input_name]
UpperCAmelCase__ : Union[str, Any] = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''np''' )
UpperCAmelCase__ : List[str] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__a ):
feat_extract.pad(__a ,padding='''max_length''' )[input_name]
UpperCAmelCase__ : List[str] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=__a ,return_tensors='''np''' )
UpperCAmelCase__ : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_are_equal(__a ,__a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ : List[Any] = feat_extract.pad(__a ,pad_to_multiple_of=10 )
UpperCAmelCase__ : List[str] = input_a[input_name]
UpperCAmelCase__ : Optional[int] = feat_extract.pad(__a ,padding='''longest''' ,pad_to_multiple_of=10 )
UpperCAmelCase__ : Tuple = input_a[input_name]
UpperCAmelCase__ : Optional[Any] = feat_extract.pad(
__a ,padding='''max_length''' ,pad_to_multiple_of=10 ,max_length=__a )
UpperCAmelCase__ : List[str] = input_a[input_name]
UpperCAmelCase__ : int = feat_extract.pad(
__a ,padding='''max_length''' ,pad_to_multiple_of=10 ,max_length=__a ,return_tensors='''np''' ,)
UpperCAmelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(__a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__a ,__a ) )
UpperCAmelCase__ : Union[str, Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ : Union[str, Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def lowerCAmelCase__ ( self ,lowerCamelCase_=False ) -> List[Any]:
'''simple docstring'''
def _inputs_have_equal_length(lowerCamelCase_ ):
UpperCAmelCase__ : Dict = len(input[0] )
for input_slice in input[1:]:
if len(__a ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase_ ,lowerCamelCase_ ):
if len(__a ) != len(__a ):
return False
for input_slice_a, input_slice_a in zip(__a ,__a ):
if not np.allclose(np.asarray(__a ) ,np.asarray(__a ) ,atol=1e-3 ):
return False
return True
UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a )
UpperCAmelCase__ : int = feat_extract.model_input_names[0]
UpperCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ : List[str] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) ,truncation=__a )
UpperCAmelCase__ : List[Any] = input_a[input_name]
UpperCAmelCase__ : str = feat_extract.pad(__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertFalse(_inputs_have_equal_length(__a ) )
# truncate to smallest with np
UpperCAmelCase__ : Dict = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) ,return_tensors='''np''' ,truncation=__a ,)
UpperCAmelCase__ : Tuple = input_a[input_name]
UpperCAmelCase__ : Union[str, Any] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) ,return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a ) )
# truncate to middle
UpperCAmelCase__ : int = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[1] ) ,truncation=__a ,return_tensors='''np''' ,)
UpperCAmelCase__ : Dict = input_a[input_name]
UpperCAmelCase__ : Optional[int] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[1] ) ,truncation=__a )
UpperCAmelCase__ : str = input_a[input_name]
UpperCAmelCase__ : List[str] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[1] ) ,return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_are_equal(__a ,__a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a ,truncation=__a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a ,padding='''longest''' ,truncation=__a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a ,padding='''longest''' ,truncation=__a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__a ):
feat_extract.pad(__a ,padding='''max_length''' ,truncation=__a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ : str = 12
UpperCAmelCase__ : Any = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=__a ,truncation=__a ,)
UpperCAmelCase__ : Tuple = input_a[input_name]
UpperCAmelCase__ : str = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=__a ,)
UpperCAmelCase__ : Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ : str = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertFalse(_inputs_have_equal_length(__a ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self._check_padding(numpify=__a )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._check_padding(numpify=__a )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self._check_truncation(numpify=__a )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=__a )
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Any = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : int = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''np''' )[input_name]
UpperCAmelCase__ : str = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Any = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''np''' )[input_name]
UpperCAmelCase__ : Union[str, Any] = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_dict
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**__a )
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Dict = [len(__a ) for x in speech_inputs]
UpperCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : int = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Optional[Any] = feat_extract.pad(__a ,padding='''longest''' ,return_tensors='''np''' )
self.assertIn('''attention_mask''' ,__a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,__a )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_dict
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = self.feature_extraction_class(**__a )
UpperCAmelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ : Optional[int] = [len(__a ) for x in speech_inputs]
UpperCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : List[str] = min(__a )
UpperCAmelCase__ : Optional[Any] = feat_extract.pad(
__a ,padding='''max_length''' ,max_length=__a ,truncation=__a ,return_tensors='''np''' )
self.assertIn('''attention_mask''' ,__a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 614 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = "google/mobilebert-uncased"
def UpperCamelCase ( self : int)-> Dict:
super().setUp()
__lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
__lowerCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Optional[Any])-> Any:
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self : Dict)-> List[Any]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""")
self.assertListEqual(snake_case_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_) , [9, 6, 7, 12, 10, 11])
def UpperCamelCase ( self : str)-> Optional[int]:
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = tokenizer.tokenize(snake_case_)
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_)
__lowerCAmelCase = rust_tokenizer.encode(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
# With lower casing
__lowerCAmelCase = self.get_tokenizer(do_lower_case=snake_case_)
__lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=snake_case_)
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = tokenizer.tokenize(snake_case_)
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_)
__lowerCAmelCase = rust_tokenizer.encode(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
def UpperCamelCase ( self : Optional[int])-> int:
__lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def UpperCamelCase ( self : Tuple)-> int:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def UpperCamelCase ( self : Tuple)-> Optional[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def UpperCamelCase ( self : Any)-> Union[str, Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def UpperCamelCase ( self : Union[str, Any])-> str:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def UpperCamelCase ( self : List[str])-> int:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def UpperCamelCase ( self : Union[str, Any])-> Optional[int]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def UpperCamelCase ( self : List[str])-> Optional[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def UpperCamelCase ( self : str)-> List[str]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def UpperCamelCase ( self : str)-> Optional[int]:
__lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCAmelCase = {}
for i, token in enumerate(snake_case_):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=snake_case_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def UpperCamelCase ( self : Optional[Any])-> Any:
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def UpperCamelCase ( self : Optional[Any])-> List[str]:
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def UpperCamelCase ( self : List[str])-> List[str]:
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def UpperCamelCase ( self : Dict)-> List[str]:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
@slow
def UpperCamelCase ( self : Dict)-> Union[str, Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""")
__lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_)
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self : Tuple)-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
__lowerCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(
snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , )
__lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(snake_case_ , """do_lower_case""") else False
__lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def UpperCamelCase ( self : Union[str, Any])-> Tuple:
__lowerCAmelCase = ["""的""", """人""", """有"""]
__lowerCAmelCase = """""".join(snake_case_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
__lowerCAmelCase = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(snake_case_)
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(snake_case_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case_ , snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
__lowerCAmelCase = False
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
__lowerCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
__lowerCAmelCase = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(snake_case_)
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(snake_case_)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case_)
]
self.assertListEqual(snake_case_ , snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
| 705 |
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
__lowerCAmelCase =[]
__lowerCAmelCase =set({"""(""", """[""", """{"""} )
__lowerCAmelCase =set({""")""", """]""", """}"""} )
__lowerCAmelCase ={"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__lowerCamelCase ) == 0 or (len(__lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowerCamelCase ) == 0
def __lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase =input("""Enter sequence of brackets: """ )
if is_balanced(__lowerCamelCase ):
print(__lowerCamelCase , """is balanced""" )
else:
print(__lowerCamelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 456 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase__ : Any = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1777 , UpperCamelCase = 1855 , UpperCamelCase = 8 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = base
for _ in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = _modexpt(UpperCamelCase , UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( UpperCamelCase="ro" , UpperCamelCase="en" , UpperCamelCase="wmt16" , UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
lowerCAmelCase__ : Any = datasets.load_dataset(UpperCamelCase , UpperCamelCase )
if save_dir is None:
lowerCAmelCase__ : Optional[Any] = f"""{dataset}-{pair}"""
lowerCAmelCase__ : Optional[Any] = Path(UpperCamelCase )
save_dir.mkdir(exist_ok=UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase__ : str = """val""" if split == """validation""" else split
lowerCAmelCase__ : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
lowerCAmelCase__ : Any = save_dir.joinpath(f"""{fn}.target""" )
lowerCAmelCase__ : Union[str, Any] = src_path.open("""w+""" )
lowerCAmelCase__ : Optional[int] = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase__ : Optional[int] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 565 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] ="""git_vision_model"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : int=3_0_7_2 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : List[Any]=1_2 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : List[str]="quick_gelu" , UpperCAmelCase__ : Union[str, Any]=1e-5 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , **UpperCAmelCase__ : Tuple , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = hidden_act
@classmethod
def _lowercase ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any ) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] ="""git"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=3_0_5_2_2 , UpperCAmelCase__ : Optional[int]=7_6_8 , UpperCAmelCase__ : Union[str, Any]=6 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : Tuple=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : int=1_0_2_4 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Tuple=1e-12 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : str="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=1_0_1 , UpperCAmelCase__ : Optional[Any]=1_0_2 , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : List[Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Dict = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : int = GitVisionConfig(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : str = tie_word_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = num_image_with_embedding
SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
def _lowercase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : str = self.__class__.model_type
return output
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : str = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
lowerCAmelCase__ : List[str] ={
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''camembert'''
def __init__( self , _A=30_522 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 148 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase__ )
__lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 469 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( lowerCamelCase ):
lowercase = 42
lowercase = 42
class a_ ( nn.Module ):
lowercase = 42
lowercase = (16, 32, 96, 2_56)
lowercase = jnp.floataa
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase = self.block_out_channels[i]
UpperCamelCase = self.block_out_channels[i + 1]
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = blocks
UpperCamelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.conv_in(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.silu(_SCREAMING_SNAKE_CASE )
for block in self.blocks:
UpperCamelCase = block(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.silu(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.conv_out(_SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class a_ ( nn.Module , lowerCamelCase , lowerCamelCase ):
lowercase = 32
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = False
lowercase = (3_20, 6_40, 12_80, 12_80)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 12_80
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = "rgb"
lowercase = (16, 32, 96, 2_56)
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
UpperCamelCase ,UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"]
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.block_out_channels
UpperCamelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
UpperCamelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase = self.only_cross_attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = block_out_channels[0]
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(_SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase = FlaxDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
if not is_final_block:
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = down_blocks
UpperCamelCase = controlnet_down_blocks
# mid
UpperCamelCase = block_out_channels[-1]
UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase = jnp.flip(_SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ):
UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase = self.time_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.time_embedding(_SCREAMING_SNAKE_CASE )
# 2. pre-process
UpperCamelCase = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
UpperCamelCase = self.conv_in(_SCREAMING_SNAKE_CASE )
UpperCamelCase = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
UpperCamelCase = self.controlnet_cond_embedding(_SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase ,UpperCamelCase = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
else:
UpperCamelCase ,UpperCamelCase = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
UpperCamelCase = ()
for down_block_res_sample, controlnet_block in zip(_SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
UpperCamelCase = controlnet_block(_SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase = controlnet_down_block_res_samples
UpperCamelCase = self.controlnet_mid_block(_SCREAMING_SNAKE_CASE )
# 6. scaling
UpperCamelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_SCREAMING_SNAKE_CASE , mid_block_res_sample=_SCREAMING_SNAKE_CASE )
| 35 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> int:
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()]
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
UpperCamelCase = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 35 | 1 |
import math
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : int = 0
lowerCamelCase_ : List[Any] = 0
while num > 0:
lowerCamelCase_ : List[Any] = num % 8
lowerCamelCase_ : Union[str, Any] = octal + (remainder * math.floor(math.pow(10 ,lowerCAmelCase__ ) ))
counter += 1
lowerCamelCase_ : Tuple = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(lowerCAmelCase__ )}"
def _SCREAMING_SNAKE_CASE ( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 364 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase = tempfile.mktemp()
with open(_lowerCamelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCamelCase )
__lowercase = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __a ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__lowercase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase = Trie()
__lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ["AB", "C"] )
| 118 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case ( snake_case : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def snake_case ( self : str ):
raise NotImplementedError()
| 704 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : str , snake_case : Any , snake_case : str=14 , snake_case : Dict=7 , snake_case : Any=True , snake_case : Any=True , snake_case : str=True , snake_case : List[str]=True , snake_case : int=True , snake_case : List[Any]=99 , snake_case : Optional[int]=32 , snake_case : str=5 , snake_case : int=4 , snake_case : str=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Optional[int]=0.1 , snake_case : Tuple=512 , snake_case : int=16 , snake_case : Any=2 , snake_case : List[str]=0.02 , snake_case : List[Any]=3 , snake_case : str=4 , snake_case : Tuple=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = use_mc_token_ids
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = self.vocab_size - 1
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = None
if self.use_mc_token_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
__UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case ( self : Any , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , *snake_case : Union[str, Any] ):
__UpperCamelCase = CTRLModel(config=snake_case )
model.to(snake_case )
model.eval()
model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
model(snake_case , token_type_ids=snake_case )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case ( self : Any , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] , *snake_case : Union[str, Any] ):
__UpperCamelCase = CTRLLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Any ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def snake_case ( self : List[Any] , snake_case : int , snake_case : Dict , snake_case : Optional[Any] , snake_case : Any , *snake_case : str ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = CTRLForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : str = False
def snake_case ( self : str , snake_case : Optional[int] , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case ( self : Optional[int] ):
__UpperCamelCase = CTRLModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def snake_case ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case )
def snake_case ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : List[Any] ):
pass
@slow
def snake_case ( self : Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = CTRLModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case ( self : List[Any] ):
pass
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case ( self : Optional[int] ):
__UpperCamelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(snake_case )
__UpperCamelCase = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=snake_case ) # Legal the president is
__UpperCamelCase = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__UpperCamelCase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 375 | 0 |
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int:
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 190 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ComputeEnvironment.AMAZON_SAGEMAKER
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = '''ml.p3.2xlarge'''
__SCREAMING_SNAKE_CASE = '''accelerate_sagemaker_execution_role'''
__SCREAMING_SNAKE_CASE = '''hf-sm'''
__SCREAMING_SNAKE_CASE = '''us-east-1'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = '''accelerate-sagemaker-1'''
__SCREAMING_SNAKE_CASE = '''1.6'''
__SCREAMING_SNAKE_CASE = '''4.4'''
__SCREAMING_SNAKE_CASE = '''train.py'''
__SCREAMING_SNAKE_CASE = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
__SCREAMING_SNAKE_CASE = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
A__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''],__lowerCamelCase )
assert isinstance(converted_args['''do_train'''],__lowerCamelCase )
assert isinstance(converted_args['''epochs'''],__lowerCamelCase )
assert isinstance(converted_args['''learning_rate'''],__lowerCamelCase )
assert isinstance(converted_args['''max_steps'''],__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 190 | 1 |
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list[list[int]]:
__UpperCamelCase : list[list[int]] = []
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] ) -> None:
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = 4
UpperCamelCase = 2
UpperCamelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 515 |
import sys
UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowerCamelCase ( __lowerCAmelCase : str = N ) -> int:
__UpperCamelCase : List[Any] = -sys.maxsize - 1
for i in range(len(__lowerCAmelCase ) - 12 ):
__UpperCamelCase : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase : Any = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 515 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase (_lowerCamelCase : float )-> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 24 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
lowerCAmelCase_ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
lowerCAmelCase__ : List[str] = getattr(_a , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : str = do_lower_case
lowerCAmelCase__ : List[Any] = strip_accents
lowerCAmelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase__ : List[str] = normalizer_class(**_a )
lowerCAmelCase__ : Dict = do_lower_case
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
lowerCAmelCase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Optional[Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 705 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def __magic_name__( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__( self ):
lowerCAmelCase__ : str = ort.SessionOptions()
lowerCAmelCase__ : List[Any] = False
return options
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''A red cat sitting on a park bench'''
lowerCAmelCase__ : List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ : int = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : Any = output.images
lowerCAmelCase__ : Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : str = '''A red cat sitting on a park bench'''
lowerCAmelCase__ : Union[str, Any] = np.random.RandomState(0 )
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 470 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__SCREAMING_SNAKE_CASE : Optional[int] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 |
def UpperCAmelCase__ ( __magic_name__ : int ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = 0, 0, 0
lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase : Any = ugly_nums[ia] * 3
lowerCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __magic_name__ ):
lowerCAmelCase : List[str] = min(__magic_name__ , __magic_name__ , __magic_name__ )
ugly_nums.append(__magic_name__ )
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""")
| 348 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures""")
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
# A mock response for an HTTP head request to emulate server down
a_ : List[str] = mock.Mock()
a_ : List[str] = 5_0_0
a_ : Dict = {}
a_ : Optional[int] = HTTPError
a_ : Dict = {}
# Download this model to make sure it's in the cache.
a_ : Dict = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
a_ : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
# This test is for deprecated behavior and can be removed in v5
a_ : Optional[int] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def snake_case_ ( self ):
with self.assertRaises(a_ ):
# config is in subfolder, the following should not work without specifying the subfolder
a_ : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
a_ : List[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(a_ )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls ):
a_ : int = TOKEN
HfFolder.save_token(a_ )
@classmethod
def snake_case_ ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def snake_case_ ( self ):
a_ : Dict = ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
a_ : int = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_ , repo_id="test-image-processor" , push_to_hub=a_ , use_auth_token=self._token )
a_ : Any = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case_ ( self ):
a_ : int = ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
a_ : Dict = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=a_ , use_auth_token=self._token )
a_ : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case_ ( self ):
CustomImageProcessor.register_for_auto_class()
a_ : List[str] = CustomImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
a_ : str = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" ) | 370 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = SpeechTaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Any = SpeechTaTokenizer(a_ )
a_ : Optional[int] = AddedToken("<mask>" , lstrip=a_ , rstrip=a_ )
a_ : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Tuple = "this is a test"
a_ : Any = "this is a test"
return input_text, output_text
def snake_case_ ( self , a_ , a_=False , a_=2_0 , a_=5 ):
a_ , a_ : Optional[Any] = self.get_input_output_texts(a_ )
a_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : Dict = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def snake_case_ ( self ):
a_ : List[Any] = "<pad>"
a_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def snake_case_ ( self ):
a_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(a_ ) , 8_1 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def snake_case_ ( self ):
a_ : Any = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a_ : Dict = tokenizer.vocab_size
a_ : List[str] = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a_ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a_ : int = tokenizer.add_tokens(a_ )
a_ : List[Any] = tokenizer.vocab_size
a_ : Tuple = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
a_ : str = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a_ : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a_ : Dict = tokenizer.add_special_tokens(a_ )
a_ : Optional[Any] = tokenizer.vocab_size
a_ : Any = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
a_ : Any = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Any = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(a_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
a_ : Tuple = tokenizer.convert_tokens_to_ids(a_ )
# fmt: off
self.assertListEqual(a_ , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a_ : Tuple = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def snake_case_ ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
a_ : List[Any] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
a_ : Tuple = {
"input_ids": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=a_ , ) | 370 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Dict , __a : List[str] , __a : Optional[Any]=7 , __a : List[Any]=3 , __a : List[Any]=1_8 , __a : int=3_0 , __a : Union[str, Any]=4_0_0 , __a : Optional[int]=True , __a : int=3_2 , __a : List[Any]=True , ) -> Optional[int]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size_divisor
__UpperCAmelCase = do_rescale
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = GLPNImageProcessor if is_vision_available() else None
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = GLPNImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , '''do_resize''' ) )
self.assertTrue(hasattr(__a , '''size_divisor''' ) )
self.assertTrue(hasattr(__a , '''resample''' ) )
self.assertTrue(hasattr(__a , '''do_rescale''' ) )
def snake_case__ ( self : Any ) -> int:
pass
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self : List[Any] ) -> Any:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self : str ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 262 | '''simple docstring'''
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
def decorator(__A : Tuple ):
lowercase : List[Any] =getattr(__A , '''handle_key''' , [] )
handle += [key]
setattr(__A , '''handle_key''' , __A )
return func
return decorator
def lowercase_ ( *__A : List[str] ) -> List[str]:
"""simple docstring"""
def decorator(__A : Optional[int] ):
lowercase : Union[str, Any] =getattr(__A , '''handle_key''' , [] )
handle += keys
setattr(__A , '''handle_key''' , __A )
return func
return decorator
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __new__( cls : Dict , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : str =super().__new__(cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not hasattr(UpperCAmelCase , '''key_handler''' ):
setattr(UpperCAmelCase , '''key_handler''' , {} )
setattr(UpperCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Any =getattr(UpperCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
lowercase : Dict =value
return new_cls
@staticmethod
def A__ ( cls : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =get_character()
if char != KEYMAP["undefined"]:
lowercase : str =ord(UpperCAmelCase )
lowercase : Dict =cls.key_handler.get(UpperCAmelCase )
if handler:
lowercase : Tuple =char
return handler(cls )
else:
return None
def lowercase_ ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 94 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase__ ( )-> List[str]:
A__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return image
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Any:
A__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int )-> List[Any]:
A__ = dct.pop(UpperCamelCase_ )
A__ = val
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] )-> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
A__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
A__ = qkv_bias
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] )-> int:
A__ = 3_6_4 if '''coco''' in model_name else 2_2_4
A__ = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
A__ = BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=False )-> Optional[Any]:
A__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
A__ = tokenizer('''\n''' , add_special_tokens=UpperCamelCase_ ).input_ids[0]
A__ , A__ = get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
A__ = BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
A__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
A__ , A__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ , A__ , A__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
A__ = original_model.state_dict()
A__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A__ = state_dict.pop(UpperCamelCase_ )
if key.startswith('''Qformer.bert''' ):
A__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
A__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
A__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
A__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
A__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
A__ = key.replace('''t5''' , '''language''' )
A__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
A__ , A__ = hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A__ = load_demo_image()
A__ = vis_processors['''eval'''](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
A__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
# create processor
A__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
A__ = BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
A__ = processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
A__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
A__ = hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
A__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
A__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
A__ = hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCamelCase_ )
else:
# cast to same type
A__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
A__ = ''''''
A__ = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
A__ = original_model.generate({'''image''': original_pixel_values} )
A__ = hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , UpperCamelCase_ )
A__ = input_ids.shape[1]
A__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
A__ = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 632 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
_snake_case = s.rsplit(__lowerCamelCase , __lowerCamelCase )
return new.join(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> List[Any]:
_snake_case = {}
_snake_case = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_snake_case = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
_snake_case = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
_snake_case = rreplace(__lowerCamelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
_snake_case = rreplace(__lowerCamelCase , '''.b''' , '''.bias''' , 1 )
_snake_case = value.float()
return upgrade
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=True ) -> int:
from dall_e import Encoder
_snake_case = Encoder()
if os.path.exists(__lowerCamelCase ):
_snake_case = torch.load(__lowerCamelCase )
else:
_snake_case = torch.hub.load_state_dict_from_url(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = ckpt.state_dict()
encoder.load_state_dict(__lowerCamelCase )
if config_path is not None:
_snake_case = FlavaImageCodebookConfig.from_pretrained(__lowerCamelCase )
else:
_snake_case = FlavaImageCodebookConfig()
_snake_case = FlavaImageCodebook(__lowerCamelCase ).eval()
_snake_case = encoder.state_dict()
_snake_case = upgrade_state_dict(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_snake_case = hf_model.state_dict()
_snake_case = count_parameters(__lowerCamelCase )
_snake_case = count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 704 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : Union[str, None] = None ) -> None:
_snake_case = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_snake_case = v.half()
if save_path is None: # overwrite src_path
_snake_case = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 430 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int | float | str ):
"""simple docstring"""
try:
__a = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__a = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
__a = len(str(_SCREAMING_SNAKE_CASE ).split(""".""" )[1] )
__a = int(decimal * (10**number_of_frac_digits) )
__a = 10**number_of_frac_digits
__a , __a = denominator, numerator
while True:
__a = dividend % divisor
if remainder == 0:
break
__a , __a = divisor, remainder
__a , __a = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 225 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(__lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowercase )
__a = self.feature_extraction_class.from_json_file(__lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
__a = self.feature_extraction_class.from_pretrained(__lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.feature_extraction_class()
self.assertIsNotNone(__lowercase )
| 225 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Any:
"""simple docstring"""
snake_case = []
snake_case = []
snake_case = []
for rt in rc.restypes:
snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case = {name: i for i, name in enumerate(__a )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
snake_case = torch.tensor(
__a , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case = torch.tensor(
__a , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case = torch.tensor(
__a , dtype=torch.floataa , device=protein['aatype'].device , )
snake_case = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case = restype_atomaa_to_atomaa[protein_aatype]
snake_case = restype_atomaa_mask[protein_aatype]
snake_case = residx_atomaa_mask
snake_case = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case = restype_atomaa_to_atomaa[protein_aatype]
snake_case = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case = rc.restype_atoa[restype_letter]
snake_case = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case = rc.atom_order[atom_name]
snake_case = 1
snake_case = restype_atomaa_mask[protein_aatype]
snake_case = residx_atomaa_mask
return protein
def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> List[Any]:
"""simple docstring"""
snake_case = tree_map(lambda _UpperCamelCase : torch.tensor(__a , device=batch['aatype'].device ) , __a , np.ndarray )
snake_case = tensor_tree_map(lambda _UpperCamelCase : np.array(__a ) , make_atomaa_masks(__a ) )
return out
| 720 | """simple docstring"""
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = [
[],
[],
[],
]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowerCAmelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
"""simple docstring"""
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = []
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case = min(self.queue )
self.queue.remove(lowerCAmelCase )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 104 | 0 |
def A__ (snake_case : int = 60_08_51_47_51_43 ) -> List[str]:
try:
__UpperCamelCase : Dict = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__UpperCamelCase : Dict = 2
__UpperCamelCase : Dict = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : str = i
while n % i == 0:
__UpperCamelCase : List[str] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 279 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=1 , lowerCamelCase_ : Tuple=False , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [vocab_size]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
UpperCAmelCase__ = keep_order
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
if self.n_clusters > 0:
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_weight""" )
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase_ )
else:
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.d_embed // (self.div_val**i)
UpperCAmelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = x
if proj is not None:
UpperCAmelCase__ = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase_ , lowerCamelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase_ , lowerCamelCase_ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=False ) ->Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = 0
if self.n_clusters == 0:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ , axis=-1 )
else:
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = []
UpperCAmelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase__ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase__ = tf.where(lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ ) - l_idx
if self.div_val == 1:
UpperCAmelCase__ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i][0]
UpperCAmelCase__ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[0] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
else:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[i] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase_ )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase_ , -cur_logprob , shape_list(lowerCamelCase_ ) )
UpperCAmelCase__ = tf.concat(lowerCamelCase_ , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase__ = tf.reduce_mean(lowerCamelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 392 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : List[str] = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''')
UpperCamelCase : str = components[-1]
if not test_fn.endswith("py"):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith("test_modeling_"):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
UpperCamelCase : Union[str, Any] = components[:-1] + [test_fn.replace(".py" , "")]
UpperCamelCase : List[str] = ".".join(A)
return test_module_path
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Tuple = get_module_path(A)
UpperCamelCase : Any = importlib.import_module(A)
return test_module
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[Any] = get_test_module(A)
for attr in dir(A):
if attr.endswith("ModelTester"):
tester_classes.append(getattr(A , A))
# sort with class names
return sorted(A , key=lambda A: x.__name__)
def A__ ( A : Tuple):
'''simple docstring'''
UpperCamelCase : Optional[int] = []
UpperCamelCase : int = get_test_module(A)
for attr in dir(A):
UpperCamelCase : Any = getattr(A , A)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase : Any = getattr(A , "all_model_classes" , [])
if len(A) > 0:
test_classes.append(A)
# sort with class names
return sorted(A , key=lambda A: x.__name__)
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : Optional[Any] = get_test_classes(A)
UpperCamelCase : Union[str, Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(A , key=lambda A: x.__name__)
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : int = test_class()
if hasattr(A , "setUp"):
test.setUp()
UpperCamelCase : int = None
if hasattr(A , "model_tester"):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase : Union[str, Any] = test.model_tester.__class__
return model_tester
def A__ ( A : Any , A : Tuple):
'''simple docstring'''
UpperCamelCase : List[str] = get_test_classes(A)
UpperCamelCase : List[str] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A)
# sort with class names
return sorted(A , key=lambda A: x.__name__)
def A__ ( A : Union[str, Any] , A : Any):
'''simple docstring'''
UpperCamelCase : Dict = get_test_classes_for_model(A , A)
UpperCamelCase : List[Any] = []
for test_class in test_classes:
UpperCamelCase : Optional[Any] = get_model_tester_from_test_class(A)
if tester_class is not None:
tester_classes.append(A)
# sort with class names
return sorted(A , key=lambda A: x.__name__)
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : Dict = get_test_classes(A)
UpperCamelCase : Union[str, Any] = {test_class: get_model_tester_from_test_class(A) for test_class in test_classes}
return test_tester_mapping
def A__ ( A : Tuple):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_model_classes(A)
UpperCamelCase : Tuple = {
model_class: get_test_classes_for_model(A , A) for model_class in model_classes
}
return model_test_mapping
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : List[str] = get_model_classes(A)
UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(A , A) for model_class in model_classes
}
return model_to_tester_mapping
def A__ ( A : Union[str, Any]):
'''simple docstring'''
if isinstance(A , A):
return o
elif isinstance(A , A):
return o.__name__
elif isinstance(A , (list, tuple)):
return [to_json(A) for x in o]
elif isinstance(A , A):
return {to_json(A): to_json(A) for k, v in o.items()}
else:
return o
| 435 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 32
UpperCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Tuple = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Dict = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
UpperCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL()
UpperCamelCase : Dict = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=0 ) -> int:
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
UpperCamelCase : Tuple = torch.manual_seed(lowerCamelCase )
else:
UpperCamelCase : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCamelCase : Optional[int] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase : Tuple = pipe("anime turle" , generator=lowerCamelCase , output_type="np" )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCamelCase : Dict = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Optional[Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 435 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.