code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase__ : Optional[int] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , UpperCamelCase )
if matches:
lowerCamelCase__ : Optional[int] = float(matches[1] )
lowerCamelCase__ : Optional[int] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase__ : List[Any] = 1001
lowerCamelCase__ : Any = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = """huggingface/label-files"""
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : str = {int(UpperCamelCase ) + 1: v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = """background"""
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = get_mobilenet_va_config(UpperCamelCase )
# Load 🤗 model
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase__ : Optional[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
lowerCamelCase__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase__ : Optional[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase__ : Union[str, Any] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCamelCase__ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase__ : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : Optional[Any] =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121
| 0
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Dict ):
snake_case__ : int = OmegaConf.load(snake_case_ )
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )["model"]
snake_case__ : str = list(state_dict.keys() )
# extract state_dict for VQVAE
snake_case__ : Union[str, Any] = {}
snake_case__ : Dict = "first_stage_model."
for key in keys:
if key.startswith(snake_case_ ):
snake_case__ : List[str] = state_dict[key]
# extract state_dict for UNetLDM
snake_case__ : str = {}
snake_case__ : Any = "model.diffusion_model."
for key in keys:
if key.startswith(snake_case_ ):
snake_case__ : List[Any] = state_dict[key]
snake_case__ : List[Any] = config.model.params.first_stage_config.params
snake_case__ : List[Any] = config.model.params.unet_config.params
snake_case__ : Any = VQModel(**snake_case_ ).eval()
vqvae.load_state_dict(snake_case_ )
snake_case__ : Optional[Any] = UNetLDMModel(**snake_case_ ).eval()
unet.load_state_dict(snake_case_ )
snake_case__ : List[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case_ , )
snake_case__ : str = LDMPipeline(snake_case_ , snake_case_ , snake_case_ )
pipeline.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 354
|
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict=int(time() ) ): # noqa: B008
snake_case__ : List[Any] = multiplier
snake_case__ : Optional[int] = increment
snake_case__ : Optional[int] = modulo
snake_case__ : Union[str, Any] = seed
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : int = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 286
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : str = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCAmelCase ( __a):
__a : int = """poolformer"""
def __init__( self , _A=3 , _A=16 , _A=16 , _A=3 , _A=4.0 , _A=[2, 2, 6, 2] , _A=[64, 1_28, 3_20, 5_12] , _A=[7, 3, 3, 3] , _A=[4, 2, 2, 2] , _A=[2, 1, 1, 1] , _A=4 , _A=0.0 , _A="gelu" , _A=True , _A=1e-5 , _A=0.02 , **_A , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : Dict = stride
_UpperCAmelCase : int = padding
_UpperCAmelCase : Optional[Any] = pool_size
_UpperCAmelCase : Any = hidden_sizes
_UpperCAmelCase : Any = mlp_ratio
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : Dict = patch_sizes
_UpperCAmelCase : int = strides
_UpperCAmelCase : str = num_encoder_blocks
_UpperCAmelCase : str = drop_path_rate
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Union[str, Any] = use_layer_scale
_UpperCAmelCase : Dict = layer_scale_init_value
_UpperCAmelCase : str = initializer_range
super().__init__(**_A )
class _UpperCAmelCase ( __a):
__a : List[Any] = version.parse("""1.11""")
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __snake_case ( self ) -> float:
'''simple docstring'''
return 2e-3
| 246
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : int=(), _lowerCAmelCase : Union[str, Any]=None, _lowerCAmelCase : Union[str, Any]="no", _lowerCAmelCase : Optional[int]="29500" ) -> Any:
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_UpperCAmelCase : Union[str, Any] = True
elif "IPython" in sys.modules:
_UpperCAmelCase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
_UpperCAmelCase : int = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : int = PrepareForLaunch(_lowerCAmelCase, distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port=_lowerCAmelCase, mixed_precision=_lowerCAmelCase ):
_UpperCAmelCase : Any = PrepareForLaunch(_lowerCAmelCase, distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_UpperCAmelCase : Union[str, Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str]=(), _lowerCAmelCase : Optional[int]=2 ) -> Tuple:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ):
_UpperCAmelCase : Tuple = PrepareForLaunch(_lowerCAmelCase, debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
| 246
| 1
|
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE__ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN )
UpperCAmelCase_ = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ = torch.device('cuda', local_rank)
UpperCAmelCase_ = socket.gethostname()
UpperCAmelCase_ = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ = dist.get_rank()
UpperCAmelCase_ = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 61
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0Xe000
UpperCAmelCase_ = 0Xe001
UpperCAmelCase_ = 0Xe002
UpperCAmelCase_ = 0Xe003
UpperCAmelCase_ = 0Xe004
# Maps special codepoints to human-readable names.
UpperCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : int=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Any=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=20_48 , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , model_max_length=_UpperCAmelCase , **_UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ = UNICODE_VOCAB_SIZE
UpperCAmelCase__ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : str ):
"""simple docstring"""
return list(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ):
"""simple docstring"""
try:
return ord(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_UpperCAmelCase )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
return ()
| 61
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCamelCase : List[str] = parser.parse_args()
lowerCamelCase : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_A = """bart"""
_A = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCAmelCase__ : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCAmelCase__ : Dict = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = faiss.StandardGpuResources()
lowerCAmelCase__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCAmelCase__ : Dict = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ : str = faiss.IndexFlatIP(128 )
lowerCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (None, None)
lowerCAmelCase__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCAmelCase__ : Tuple = elia["""train_eli5"""]
lowerCAmelCase__ : Dict = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_A , _A , _A = load_indexes()
_A , _A , _A , _A = load_models()
_A , _A = load_train_data()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]:
lowerCAmelCase__ : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> List[str]:
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCAmelCase__ : Optional[Any] = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0.8 ) -> Optional[int]:
with torch.no_grad():
lowerCAmelCase__ : List[Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_A = st.sidebar.checkbox("""Demo options""")
if demo_options:
_A = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_A = action_list.index(action_st)
_A = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_A = show_type == """Show full text of passages"""
else:
_A = 3
_A = True
_A = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_A = """wiki40b"""
_A = """dense"""
_A = """beam"""
_A = 2
_A = 6_4
_A = 2_5_6
_A = None
_A = None
_A = st.sidebar.checkbox("""Generation options""")
if generate_options:
_A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_A = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_A = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_A = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_A = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_A = None
# start main text
_A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_A = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_A = st.text_input("""Enter your question here:""", """""")
else:
_A = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_A , _A = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_A , _A = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_A = support_list[:1_0]
_A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_A , _A = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_A , _A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_A = res[1].strip()
if sec_titles == "":
_A = """[{}]({})""".format(res[0], wiki_url)
else:
_A = sec_titles.split(""" & """)
_A = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_A = find_nearest_training(question)
_A = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_A = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 242
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCAmelCase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _snake_case ( lowercase__ : List[str]=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = None
UpperCAmelCase_ :Optional[Any] = None
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Dict = dataset_module_factory(__A , cache_dir=__A )
lowerCAmelCase_ :List[str] = import_main_class(dataset_module.module_path , dataset=__A )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
lowerCAmelCase_ :Any = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ :Dict = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def _snake_case ( lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
lowerCAmelCase_ :List[Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Optional[int] = import_main_class(dataset_module.module_path )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ :Optional[int] = None
builder_instance.download_and_prepare()
lowerCAmelCase_ :Tuple = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Any = import_main_class(dataset_module.module_path , dataset=lowercase__ )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
lowerCAmelCase_ :Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowercase__ )
assert next(iter(ds["""train"""] ) )
| 355
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1
| 0
|
import numpy as np
lowercase : List[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : int = np.array(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Tuple = np.where(letter == self.SQUARE )
lowercase : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = message.lower()
lowercase : str = message.replace(""" """ ,"""""" )
lowercase : Tuple = message.replace("""j""" ,"""i""" )
lowercase : List[str] = np.empty((2, len(snake_case )) )
for letter_index in range(len(snake_case ) ):
lowercase : List[str] = self.letter_to_numbers(message[letter_index] )
lowercase : List[str] = numbers[0]
lowercase : Union[str, Any] = numbers[1]
lowercase : str = first_step.reshape(2 * len(snake_case ) )
lowercase : Dict = """"""
for numbers_index in range(len(snake_case ) ):
lowercase : Any = int(second_step[numbers_index * 2] )
lowercase : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
lowercase : List[str] = self.numbers_to_letter(snake_case ,snake_case )
lowercase : Tuple = encoded_message + letter
return encoded_message
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = message.lower()
message.replace(""" """ ,"""""" )
lowercase : Dict = np.empty(2 * len(snake_case ) )
for letter_index in range(len(snake_case ) ):
lowercase : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowercase : List[str] = numbers[0]
lowercase : Any = numbers[1]
lowercase : Optional[int] = first_step.reshape((2, len(snake_case )) )
lowercase : Tuple = """"""
for numbers_index in range(len(snake_case ) ):
lowercase : Any = int(second_step[0, numbers_index] )
lowercase : Optional[int] = int(second_step[1, numbers_index] )
lowercase : List[str] = self.numbers_to_letter(snake_case ,snake_case )
lowercase : Any = decoded_message + letter
return decoded_message
| 20
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=2_88 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_channels
snake_case = patch_size
snake_case = image_size
snake_case = initializer_factor
snake_case = layer_norm_eps
snake_case = stop_gradient
snake_case = share_layernorm
snake_case = remove_last_layer
@classmethod
def snake_case ( cls , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case ,snake_case = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase=5_02_65 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=1 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_14 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = initializer_factor
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = pad_token_id
snake_case = bos_token_id
snake_case = eos_token_id
@classmethod
def snake_case ( cls , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case ,snake_case = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = """bridgetower"""
def __init__( self , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=7_68 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase="add" , lowerCAmelCase=12 , lowerCAmelCase=6 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = kwargs.pop('text_config_dict' , lowerCAmelCase )
snake_case = kwargs.pop('vision_config_dict' , lowerCAmelCase )
super().__init__(**lowerCAmelCase )
snake_case = share_cross_modal_transformer_layers
snake_case = hidden_act
snake_case = hidden_size
snake_case = initializer_factor
snake_case = layer_norm_eps
snake_case = share_link_tower_layers
snake_case = link_tower_type
snake_case = num_attention_heads
snake_case = num_hidden_layers
snake_case = tie_word_embeddings
snake_case = init_layernorm_from_vision_encoder
if text_config is None:
snake_case = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
snake_case = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
snake_case = BridgeTowerTextConfig(**lowerCAmelCase )
snake_case = BridgeTowerVisionConfig(**lowerCAmelCase )
@classmethod
def snake_case ( cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.text_config.to_dict()
snake_case = self.vision_config.to_dict()
snake_case = self.__class__.model_type
return output
| 149
|
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase )
snake_case = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase , 'tokenizers' )
snake_case = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase , 'tensorflow_text' )
snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
"""simple docstring"""
snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase )
self.assertIn('tensorflow_text' , lowerCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' )
snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
| 149
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=13 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : int=[1, 2, 1] , lowerCAmelCase__ : Optional[Any]=[2, 2, 4] , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=2.0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : List[str]=1E-5 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : List[str]=["stage1", "stage2", "stage3"] , lowerCAmelCase__ : Optional[int]=[1, 2, 3] , ):
SCREAMING_SNAKE_CASE_: Optional[int] = parent
SCREAMING_SNAKE_CASE_: Optional[int] = batch_size
SCREAMING_SNAKE_CASE_: List[str] = image_size
SCREAMING_SNAKE_CASE_: int = patch_size
SCREAMING_SNAKE_CASE_: Optional[int] = num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE_: List[str] = depths
SCREAMING_SNAKE_CASE_: Any = num_heads
SCREAMING_SNAKE_CASE_: Any = window_size
SCREAMING_SNAKE_CASE_: str = mlp_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: int = use_absolute_embeddings
SCREAMING_SNAKE_CASE_: Tuple = patch_norm
SCREAMING_SNAKE_CASE_: Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] = is_training
SCREAMING_SNAKE_CASE_: List[str] = scope
SCREAMING_SNAKE_CASE_: List[Any] = use_labels
SCREAMING_SNAKE_CASE_: Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_: int = encoder_stride
SCREAMING_SNAKE_CASE_: List[str] = out_features
SCREAMING_SNAKE_CASE_: Optional[Any] = out_indices
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Optional[int] = MaskFormerSwinModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
SCREAMING_SNAKE_CASE_: Dict = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = MaskFormerSwinBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["stem"]
SCREAMING_SNAKE_CASE_: Dict = MaskFormerSwinBackbone(config=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = MaskFormerSwinModelTester(self)
SCREAMING_SNAKE_CASE_: Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def _SCREAMING_SNAKE_CASE ( self : Any):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
@unittest.skip("Swin does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
@unittest.skip("Swin does not support feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE_: int = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# Swin has a different seq_length
SCREAMING_SNAKE_CASE_: Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: int = 3
SCREAMING_SNAKE_CASE_: Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_: Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = 0
return t
def check_equivalence(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]={}):
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__).to_tuple()
def recursive_check(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]):
if isinstance(lowerCAmelCase__ , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase__) , set_nan_tensor_to_zero(lowerCAmelCase__) , atol=1E-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(lowerCAmelCase__).any()} and `inf`: {torch.isinf(lowerCAmelCase__)}. Dict has"
F" `nan`: {torch.isnan(lowerCAmelCase__).any()} and `inf`: {torch.isinf(lowerCAmelCase__)}."
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True})
SCREAMING_SNAKE_CASE_: List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True})
@require_torch
class __lowercase ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCAmelCase : str = MaskFormerSwinConfig
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[Any] = MaskFormerSwinModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = backbone_class(lowerCAmelCase__)
backbone.to(lowerCAmelCase__)
backbone.eval()
SCREAMING_SNAKE_CASE_: Any = backbone(**lowerCAmelCase__)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase__)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE_: str = backbone(**lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE_: Dict = backbone(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__)
self.assertIsNotNone(outputs.attentions)
| 13
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
_A = 5
_A = 10
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ : str = sp.SentencePieceProcessor()
spm_model.Load(_lowerCamelCase )
UpperCAmelCase__ : List[str] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCamelCase ) )]
UpperCAmelCase__ : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : int = Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
UpperCAmelCase__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = """<pad>"""
UpperCAmelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowerCamelCase ) , 1001 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [289, 50, 14, 174, 386] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'valhalla/s2t_mustc_multilinguial_medium'
SCREAMING_SNAKE_CASE = 'C\'est trop cool'
SCREAMING_SNAKE_CASE = 'Esto es genial'
@classmethod
def _a (cls ):
"""simple docstring"""
UpperCAmelCase__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _a (self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def _a (self ):
"""simple docstring"""
self.assertIn(_lowerCamelCase , self.tokenizer.all_special_ids )
UpperCAmelCase__ : Optional[Any] = [ES_CODE, 4, 1601, 47, 7647, 2]
UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = """fr"""
UpperCAmelCase__ : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCAmelCase__ : Tuple = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 355
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : Optional[Any] = hidden_sizes
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Tuple = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = out_features
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Tuple = scope
UpperCAmelCase__ : Optional[int] = num_stages
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = _config_zero_init(_lowerCamelCase )
UpperCAmelCase__ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
"""simple docstring"""
pass
@slow
def _a (self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
UpperCAmelCase__ : str = Image.open(lowerCAmelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : int = model(**_lowerCamelCase )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : int = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 166
| 0
|
import enum
import shutil
import sys
_snake_case , _snake_case = shutil.get_terminal_size()
_snake_case = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class lowercase ( enum.Enum ):
_a = 0
_a = 1
def lowerCAmelCase_ ( snake_case_,snake_case_="" ):
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_="" ):
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''',snake_case_ )
def lowerCAmelCase_ ( ):
forceWrite("""\r""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCAmelCase_ ( ):
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowerCAmelCase_ ( ):
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 26
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26
| 1
|
import mpmath # for roots of unity
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : str =list(poly_a or [0] )[:]
__UpperCamelCase : Optional[int] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__UpperCamelCase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__UpperCamelCase : List[Any] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
__UpperCamelCase : str =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__UpperCamelCase : str =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__UpperCamelCase : int =self.__multiply()
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase__ ) <= 1:
return dft[0]
#
__UpperCamelCase : Union[str, Any] =self.c_max_length // 2
while next_ncol > 0:
__UpperCamelCase : Any =[[] for i in range(lowerCamelCase__ )]
__UpperCamelCase : Any =self.root**next_ncol
# First half of next step
__UpperCamelCase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__UpperCamelCase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__UpperCamelCase : int =new_dft
__UpperCamelCase : Optional[Any] =next_ncol // 2
return dft[0]
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.__dft('A' )
__UpperCamelCase : Tuple =self.__dft('B' )
__UpperCamelCase : Dict =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__UpperCamelCase : Optional[int] =2
while next_ncol <= self.c_max_length:
__UpperCamelCase : Optional[Any] =[[] for i in range(lowerCamelCase__ )]
__UpperCamelCase : int =self.root ** (next_ncol // 2)
__UpperCamelCase : Tuple =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__UpperCamelCase : Tuple =new_inverse_c
next_ncol *= 2
# Unpack
__UpperCamelCase : List[Any] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__UpperCamelCase : int ='B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__UpperCamelCase : int ='A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ :List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""mask2former"""
UpperCamelCase__ : Tuple =["""swin"""]
UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =backbone_config.pop('model_type' )
__UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Dict =backbone_config
__UpperCamelCase : Optional[int] =feature_size
__UpperCamelCase : Union[str, Any] =mask_feature_size
__UpperCamelCase : Tuple =hidden_dim
__UpperCamelCase : Optional[int] =encoder_feedforward_dim
__UpperCamelCase : Optional[int] =activation_function
__UpperCamelCase : Dict =encoder_layers
__UpperCamelCase : List[Any] =decoder_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Optional[Any] =dropout
__UpperCamelCase : int =dim_feedforward
__UpperCamelCase : Any =pre_norm
__UpperCamelCase : Union[str, Any] =enforce_input_projection
__UpperCamelCase : str =common_stride
__UpperCamelCase : List[str] =ignore_value
__UpperCamelCase : Optional[int] =num_queries
__UpperCamelCase : Any =no_object_weight
__UpperCamelCase : int =class_weight
__UpperCamelCase : str =mask_weight
__UpperCamelCase : Dict =dice_weight
__UpperCamelCase : str =train_num_points
__UpperCamelCase : str =oversample_ratio
__UpperCamelCase : int =importance_sample_ratio
__UpperCamelCase : List[str] =init_std
__UpperCamelCase : Union[str, Any] =init_xavier_std
__UpperCamelCase : Any =use_auxiliary_loss
__UpperCamelCase : Tuple =feature_strides
__UpperCamelCase : Dict =output_auxiliary_logits
__UpperCamelCase : Union[str, Any] =decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[Any] =self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] =self.__class__.model_type
return output
| 245
| 1
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE( UpperCamelCase__ ):
def __init__( self: str , *UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Any ) -> Union[str, Any]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
snake_case__ = eval_examples
snake_case__ = post_process_function
snake_case__ = quant_trainer_args
snake_case__ = 1_28 # default number of calibration samples
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Optional[Any]=None ) -> Any:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
snake_case__ = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case__ = self._remove_unused_columns(UpperCamelCase_ , description='Calibration' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str=None ) -> Dict:
snake_case__ = self.train_dataset if calib_dataset is None else calib_dataset
snake_case__ = self.get_calib_dataloader(UpperCamelCase_ )
snake_case__ = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
snake_case__ , snake_case__ , snake_case__ = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
snake_case__ = model
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[Any]=None , UpperCamelCase: List[str]=None , UpperCamelCase: str = "eval" ) -> Tuple:
snake_case__ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ = self.get_eval_dataloader(UpperCamelCase_ )
snake_case__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
UpperCamelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
snake_case__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
snake_case__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
snake_case__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: List[Any]=None , UpperCamelCase: str = "test" ) -> Optional[int]:
snake_case__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
UpperCamelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
snake_case__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , 'predict' )
snake_case__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def lowerCAmelCase_ ( self: int , UpperCamelCase: Tuple="./" ) -> str:
snake_case__ = self.eval_dataset
snake_case__ = self.get_eval_dataloader(UpperCamelCase_ )
snake_case__ = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
snake_case__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
snake_case__ = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
snake_case__ = True
snake_case__ = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
snake_case__ = model.module if hasattr(UpperCamelCase_ , 'module' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
snake_case__ = os.path.join(UpperCamelCase_ , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCamelCase_ , )
logger.info('onnx export finished' )
| 307
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''token-classification'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowercase__ = Namespace(**UpperCamelCase_ )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(UpperCamelCase_ , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowercase__ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowercase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowercase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowercase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
lowercase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = NERTransformer(args)
lowerCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 110
| 0
|
from __future__ import annotations
def __A ( __lowerCamelCase ) -> str:
if len(lowerCamelCase__ ) == 0:
return []
a , a = min(lowerCamelCase__ ), max(lowerCamelCase__ )
a = int(max_value - min_value ) + 1
a = [[] for _ in range(lowerCamelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCamelCase__ )
return [v for bucket in buckets for v in sorted(lowerCamelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 350
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase : List[str] =logging.get_logger(__name__)
lowerCAmelCase : Any ='''Hello, World!'''
lowerCAmelCase : Any ='''en_XX'''
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ):
lowercase_ :Tuple = Path("data_bin" )
lowercase_ :str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) ,checkpoint_file=Path(__snake_case ).name ,_name="xmod_base" ,arch="xmod_base" ,task="multilingual_masked_lm" ,data_name_or_path=str(__snake_case ) ,bpe="sentencepiece" ,sentencepiece_model=str(Path(__snake_case ).parent / "sentencepiece.bpe.model" ) ,src_dict=str(data_dir / "dict.txt" ) ,)
xmod.eval() # disable dropout
print(__snake_case )
lowercase_ :List[str] = xmod.model.encoder.sentence_encoder
lowercase_ :int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,"bottleneck" ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
lowercase_ :Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" ,__snake_case )
lowercase_ :Optional[int] = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase_ :Tuple = xmod_sent_encoder.embed_tokens.weight
lowercase_ :int = xmod_sent_encoder.embed_positions.weight
lowercase_ :Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase_ :List[str] = xmod_sent_encoder.layernorm_embedding.weight
lowercase_ :List[str] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase_ :Optional[Any] = model.roberta.encoder.layer[i]
lowercase_ :str = xmod_sent_encoder.layers[i]
# self attention
lowercase_ :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowercase_ :Any = xmod_layer.self_attn.q_proj.weight
lowercase_ :Optional[int] = xmod_layer.self_attn.q_proj.bias
lowercase_ :Optional[int] = xmod_layer.self_attn.k_proj.weight
lowercase_ :int = xmod_layer.self_attn.k_proj.bias
lowercase_ :Union[str, Any] = xmod_layer.self_attn.v_proj.weight
lowercase_ :Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase_ :Tuple = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowercase_ :Union[str, Any] = xmod_layer.self_attn.out_proj.weight
lowercase_ :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowercase_ :Any = xmod_layer.self_attn_layer_norm.weight
lowercase_ :str = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase_ :Union[str, Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowercase_ :int = xmod_layer.fca.weight
lowercase_ :List[str] = xmod_layer.fca.bias
# output
lowercase_ :Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowercase_ :Any = xmod_layer.fca.weight
lowercase_ :Optional[Any] = xmod_layer.fca.bias
lowercase_ :Dict = xmod_layer.final_layer_norm.weight
lowercase_ :Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase_ :List[str] = xmod_layer.adapter_layer_norm.weight
lowercase_ :List[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase_ :Dict = bert_output.adapter_modules[lang_code]
lowercase_ :List[str] = xmod_layer.adapter_modules[lang_code]
lowercase_ :List[Any] = from_adapter.fca.weight
lowercase_ :Optional[Any] = from_adapter.fca.bias
lowercase_ :Any = from_adapter.fca.weight
lowercase_ :int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase_ :Dict = xmod_sent_encoder.layer_norm.weight
lowercase_ :str = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase_ :Union[str, Any] = xmod.model.classification_heads["mnli"].dense.weight
lowercase_ :List[str] = xmod.model.classification_heads["mnli"].dense.bias
lowercase_ :Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
lowercase_ :str = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowercase_ :List[Any] = xmod.model.encoder.lm_head.dense.weight
lowercase_ :Dict = xmod.model.encoder.lm_head.dense.bias
lowercase_ :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowercase_ :int = xmod.model.encoder.lm_head.layer_norm.bias
lowercase_ :int = xmod.model.encoder.lm_head.weight
lowercase_ :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase_ :Optional[Any] = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
lowercase_ :Optional[Any] = model(__snake_case )[0]
if classification_head:
lowercase_ :Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(__snake_case ) )
else:
lowercase_ :Dict = xmod.model(__snake_case ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
lowercase_ :List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase_ :Union[str, Any] = torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__snake_case ).mkdir(parents=__snake_case ,exist_ok=__snake_case )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase : List[str] =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 223
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_a = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
_a = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
_a = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def UpperCAmelCase ( self , __a , __a , __a=None) -> Dict:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__a , __a , sample_weight=__a)),
}
| 194
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "README.md" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["default"] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["default"] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 244
| 1
|
import os
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , '''num.txt''' )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 186
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = d_model
A_ : Optional[Any] = d_kv
A_ : str = d_ff
A_ : int = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[str] = dropout_rate
A_ : Dict = layer_norm_epsilon
A_ : str = initializer_factor
A_ : Dict = feed_forward_proj
A_ : int = use_cache
A_ : Optional[int] = self.feed_forward_proj.split('''-''' )
A_ : Optional[Any] = act_info[-1]
A_ : Optional[Any] = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : List[str] = '''past_encoder_sequence + sequence'''
A_ : Optional[int] = {0: '''batch'''}
A_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 186
| 1
|
from __future__ import annotations
def A ( lowercase , lowercase = None ) -> list[list[str]]:
'''simple docstring'''
UpperCamelCase = word_bank or []
# create a table
UpperCamelCase = len(lowercase ) + 1
UpperCamelCase = []
for _ in range(lowercase ):
table.append([] )
# seed value
UpperCamelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
UpperCamelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 365
|
from __future__ import annotations
class lowercase :
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = text, pattern
UpperCamelCase , UpperCamelCase = len(A_ ), len(A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCamelCase ( self ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Union[str, Any] = "ABAABA"
_UpperCAmelCase : Any = "AB"
_UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 110
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
lowerCamelCase__ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict =['input_ids', 'attention_mask']
__lowerCamelCase : Union[str, Any] =NllbTokenizer
__lowerCamelCase : str =[]
__lowerCamelCase : Dict =[]
def __init__( self : Optional[int] , __lowercase : Optional[int]=None , __lowercase : List[Any]=None , __lowercase : Any="<s>" , __lowercase : str="</s>" , __lowercase : List[str]="</s>" , __lowercase : List[str]="<s>" , __lowercase : Dict="<unk>" , __lowercase : Optional[int]="<pad>" , __lowercase : Optional[int]="<mask>" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Any=None , __lowercase : Union[str, Any]=False , **__lowercase : Tuple , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__a = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else """eng_Latn"""
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : str , __lowercase : str ):
'''simple docstring'''
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Tuple , __lowercase : Tuple = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : List[Any] , __lowercase : str , __lowercase : Any = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : str , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : Union[str, Any] , **__lowercase : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__a = src_lang
__a = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__a = self.convert_tokens_to_ids(lowerCAmelCase__ )
__a = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Tuple , __lowercase : Any , __lowercase : Any = "eng_Latn" , __lowercase : Union[str, Any] = None , __lowercase : List[Any] = "fra_Latn" , **__lowercase : int , ):
'''simple docstring'''
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self : List[str] , __lowercase : int , __lowercase : Union[str, Any] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
__a = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
A_ = KandinskyVaaControlnetPipeline
A_ = ["image_embeds", "negative_image_embeds", "hint"]
A_ = ["image_embeds", "negative_image_embeds", "hint"]
A_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A_ = False
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__a : Optional[int] = UNetaDConditionModel(**__snake_case )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.dummy_unet
__a : Optional[Any] = self.dummy_movq
__a : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=__snake_case , )
__a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create hint
__a : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('mps' ):
__a : Union[str, Any] = torch.manual_seed(__snake_case )
else:
__a : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu'
__a : Any = self.get_dummy_components()
__a : str = self.pipeline_class(**__snake_case )
__a : str = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a : Optional[int] = pipe(**self.get_dummy_inputs(__snake_case ) )
__a : Optional[Any] = output.images
__a : Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__a : int = image[0, -3:, -3:, -1]
__a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__a : Tuple = torch.from_numpy(np.array(__snake_case ) ).float() / 255.0
__a : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__a : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__a : int = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__a : List[str] = 'A robot, 4k photo'
__a : Any = torch.Generator(device='cuda' ).manual_seed(0 )
__a , __a : Any = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__a : List[str] = torch.Generator(device='cuda' ).manual_seed(0 )
__a : Optional[Any] = pipeline(
image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=100 , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 0
|
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_, -1, -1 ), range(snake_case_, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_, -1, -1 ), range(snake_case_, len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_, snake_case_, snake_case_ ):
lowercase : Union[str, Any] = 1
solve(snake_case_, row + 1 )
lowercase : int = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
__A = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
__A = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]="binary" ) -> Any:
"""simple docstring"""
__lowerCamelCase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ , average=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = {}
for id_pred, label in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
__lowerCamelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCamelCase = [(pred, label)]
__lowerCamelCase , __lowerCamelCase = [], []
for question, preds_labels in question_map.items():
__lowerCamelCase , __lowerCamelCase = zip(*UpperCamelCase__ )
__lowerCamelCase = fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ , average='macro' )
fas.append(UpperCamelCase__ )
__lowerCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase__ ) )
ems.append(UpperCamelCase__ )
__lowerCamelCase = float(sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) )
__lowerCamelCase = sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
__lowerCamelCase = float(fa_score(y_true=UpperCamelCase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ , fa_avg='macro' )
elif self.config_name == "record":
__lowerCamelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
__lowerCamelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowerCamelCase__ , lowerCamelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 348
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
_lowercase =['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
lowerCAmelCase_ = size if size is not None else {"shortest_edge": 256}
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , param_name="crop_size" )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase_ = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , param_name="crop_size" )
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
lowerCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Union[str, Any]:
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_UpperCamelCase ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(_UpperCamelCase ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCamelCase )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCamelCase )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 231
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=64 , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = np.random.default_rng(_UpperCamelCase )
lowerCAmelCase_ = length
lowerCAmelCase_ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> int:
return self.length
def __getitem__( self , _UpperCamelCase ) -> Dict:
return {"x": self.x[i], "y": self.y[i]}
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> Any:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a[0] + self.b[0]
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a + self.b
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase_ = load_dataset("csv" , data_files=__lowerCAmelCase )
lowerCAmelCase_ = datasets["train"].unique("label" )
lowerCAmelCase_ = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
lowerCAmelCase_ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
lowerCAmelCase_ = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 231
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
'''simple docstring'''
class __lowerCamelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
_A : List[Any] = row
_A : Union[str, Any] = col
_A : List[str] = graph
def A ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
# Checking all 8 elements surrounding nth element
_A : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
_A : List[Any] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE)
def A ( self : int): # And finally, count all islands.
_A : Dict = [[False for j in range(self.COL)] for i in range(self.ROW)]
_A : Union[str, Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
count += 1
return count
| 227
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'big_bird'
def __init__( self , lowercase=50358 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=4096 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=66 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=64 , lowercase=3 , lowercase=None , **lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_cache
A__ = rescale_embeddings
A__ = attention_type
A__ = use_bias
A__ = block_size
A__ = num_random_blocks
A__ = classifier_dropout
class a__ ( snake_case ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list:
'''simple docstring'''
A__ = int(SCREAMING_SNAKE_CASE_ )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__ , A__ , A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase__ = hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 68
| 1
|
'''simple docstring'''
from copy import deepcopy
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowercase : list[int] | None = None , __lowercase : int | None = None ) -> None:
if arr is None and size is not None:
SCREAMING_SNAKE_CASE__ : int =size
SCREAMING_SNAKE_CASE__ : Optional[int] =[0] * size
elif arr is not None:
self.init(__lowercase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __magic_name__ ( self : Any , __lowercase : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
SCREAMING_SNAKE_CASE__ : int =deepcopy(__lowercase )
for i in range(1 , self.size ):
SCREAMING_SNAKE_CASE__ : str =self.next_(__lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def __magic_name__ ( self : Dict ) -> list[int]:
SCREAMING_SNAKE_CASE__ : Tuple =self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.next_(__lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index + (index & (-index))
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index - (index & (-index))
def __magic_name__ ( self : int , __lowercase : int , __lowercase : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE__ : Optional[int] =self.next_(__lowercase )
def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> None:
self.add(__lowercase , value - self.get(__lowercase ) )
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> int:
if right == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[str] =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE__ : Optional[int] =self.prev(__lowercase )
return result
def __magic_name__ ( self : Union[str, Any] , __lowercase : int , __lowercase : int ) -> int:
return self.prefix(__lowercase ) - self.prefix(__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : int ) -> int:
return self.query(__lowercase , index + 1 )
def __magic_name__ ( self : Tuple , __lowercase : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE__ : Any =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE__ : int =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : str="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : List[str] , ) -> Optional[int]:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : List[Any] =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={}
@property
def __magic_name__ ( self : Any ) -> int:
return len(self.encoder )
def __magic_name__ ( self : Tuple ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : int =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : List[str] =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Dict =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : int =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =bigram
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Optional[int] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : int =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : int =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : str =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : str , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[Any] =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : int =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : List[Any] , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : str =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : List[str] =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Any =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 222
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a__ ( ):
'''simple docstring'''
__magic_name__ , __magic_name__ = 9, 14 # noqa: F841
__magic_name__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__magic_name__ = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__magic_name__ = mst(A_ )
__magic_name__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__magic_name__ = tuple(answer[:2] )
__magic_name__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 88
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__lowercase , """depth_multiplier""" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.2_5 , __lowercase=8 , __lowercase=True , __lowercase=1_0_2_4 , __lowercase=3_2 , __lowercase="relu6" , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = parent
a__ : Dict = batch_size
a__ : Optional[int] = num_channels
a__ : int = image_size
a__ : Union[str, Any] = depth_multiplier
a__ : int = min_depth
a__ : List[str] = tf_padding
a__ : Tuple = int(last_hidden_size * depth_multiplier )
a__ : Union[str, Any] = output_stride
a__ : List[Any] = hidden_act
a__ : int = classifier_dropout_prob
a__ : str = use_labels
a__ : Dict = is_training
a__ : Dict = num_labels
a__ : int = initializer_range
a__ : List[Any] = scope
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : str = None
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.num_labels )
a__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[Any] = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : int = self.num_labels
a__ : Dict = MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[str] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Dict = config_and_inputs
a__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowerCAmelCase :int = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Dict = False
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = MobileNetVaModelTester(self )
a__ : Tuple = MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(__lowercase )
a__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__lowercase , __lowercase , __lowercase ):
a__ : Dict = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
a__ : List[Any] = outputs.hidden_states
a__ : List[Any] = 2_6
self.assertEqual(len(__lowercase ) , __lowercase )
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Dict = MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__lowercase )
a__ : Union[str, Any] = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : List[str] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
a__ : Tuple = model(**__lowercase )
# verify the logits
a__ : Tuple = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowercase )
a__ : Tuple = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 170
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_ = False
try:
SCREAMING_SNAKE_CASE_ = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str = None ,lowerCamelCase__ : list = [] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = choices
SCREAMING_SNAKE_CASE = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE = """*"""
else:
SCREAMING_SNAKE_CASE = """➔ """
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str = "" ) -> Any:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,lowerCamelCase__ )
else:
forceWrite(self.choices[index] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(lowerCamelCase__ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Direction ,lowerCamelCase__ : int = 1 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCamelCase__ )
move_cursor(lowerCamelCase__ ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position ,"""DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position ,"""DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCamelCase__ )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,lowerCamelCase__ )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt ,"""\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" ,"""\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" ,"""\n""" )
SCREAMING_SNAKE_CASE = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCamelCase__ )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position ,"""UP""" )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE = default_choice
else:
SCREAMING_SNAKE_CASE = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,"""UP""" )
clear_line()
self.write_choice(lowerCamelCase__ ,"""\n""" )
return choice
| 193
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=None ,) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embeddings_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case : Tuple = False
__snake_case : int = False
__snake_case : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ):
return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1000)
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 193
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "data2vec-vision"
def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=True, lowerCAmelCase__=[3, 5, 7, 11], lowerCAmelCase__=[1, 2, 3, 6], lowerCAmelCase__=True, lowerCAmelCase__=0.4, lowerCAmelCase__=256, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=255, **lowerCAmelCase__, ) -> Optional[int]:
super().__init__(**lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 69
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
__UpperCamelCase =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase =DDPMScheduler()
__UpperCamelCase =AudioDiffusionPipeline(vqvae=UpperCamelCase__ , unet=self.dummy_unet , mel=UpperCamelCase__ , scheduler=UpperCamelCase__ )
__UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
__UpperCamelCase =pipe(generator=UpperCamelCase__ , steps=4 )
__UpperCamelCase =output.audios[0]
__UpperCamelCase =output.images[0]
__UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
__UpperCamelCase =pipe(generator=UpperCamelCase__ , steps=4 , return_dict=UpperCamelCase__ )
__UpperCamelCase =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase =np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__UpperCamelCase =np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
__UpperCamelCase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase =DDIMScheduler()
__UpperCamelCase =self.dummy_vqvae_and_unet
__UpperCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCamelCase__ , scheduler=UpperCamelCase__ )
__UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
__UpperCamelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
__UpperCamelCase =pipe(raw_audio=UpperCamelCase__ , generator=UpperCamelCase__ , start_step=5 , steps=10 )
__UpperCamelCase =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase =np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__UpperCamelCase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase =self.dummy_unet_condition
__UpperCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCamelCase__ , mel=UpperCamelCase__ , scheduler=UpperCamelCase__ )
__UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
__UpperCamelCase =torch.rand((1, 1, 10) )
__UpperCamelCase =pipe(generator=UpperCamelCase__ , encoding=UpperCamelCase__ )
__UpperCamelCase =output.images[0]
__UpperCamelCase =np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__UpperCamelCase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =torch_device
__UpperCamelCase =DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
__UpperCamelCase =pipe(generator=UpperCamelCase__ )
__UpperCamelCase =output.audios[0]
__UpperCamelCase =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase =np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__UpperCamelCase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 85
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
__UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
__UpperCamelCase =add_prefix_space
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 85
| 1
|
from itertools import product
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = sides_number
lowercase_ = max_face_number * dice_number
lowercase_ = [0] * (max_total + 1)
lowercase_ = 1
lowercase_ = range(snake_case__ , max_face_number + 1 )
for dice_numbers in product(snake_case__ , repeat=snake_case__ ):
lowercase_ = sum(snake_case__ )
totals_frequencies[total] += 1
return totals_frequencies
def a ( ):
'''simple docstring'''
lowercase_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase_ = 0
lowercase_ = 9
lowercase_ = 4 * 9
lowercase_ = 6
for peter_total in range(snake_case__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase_ = (4**9) * (6**6)
lowercase_ = peter_wins_count / total_games_number
lowercase_ = round(snake_case__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a :
def __init__( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any]=13 , __magic_name__ : int=7 , __magic_name__ : int=False , __magic_name__ : str=True , __magic_name__ : int=False , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=19 , __magic_name__ : Any=32 , __magic_name__ : List[Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : str="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : int=3 , __magic_name__ : Dict=4 , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = False
__a : Any = (EsmForProteinFolding,) if is_torch_available() else ()
__a : Any = ()
__a : Optional[Any] = {} if is_torch_available() else {}
__a : List[str] = False
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip('''Does not support attention outputs''' )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@require_torch
class __a (lowerCamelCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Tuple = model(__magic_name__ )['''positions''']
UpperCAmelCase_ : Dict = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 125
| 0
|
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__a , 2 ) - pow(__a , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__a , 2 ) - pow(__a , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__a , 2 ) + pow(__a , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A = ["text", "image", "audio"]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs"))
self.assertTrue(hasattr(self.tool , "outputs"))
lowerCamelCase__: Tuple =self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
lowerCamelCase__: Optional[Any] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =create_inputs(self.tool.inputs)
lowerCamelCase__: str =self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
lowerCamelCase__: Optional[Any] =[outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description"))
self.assertTrue(hasattr(self.tool , "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =create_inputs(self.tool.inputs)
lowerCamelCase__: Dict =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
lowerCamelCase__: Any =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Any =create_inputs(self.tool.inputs)
lowerCamelCase__: int =[]
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
lowerCamelCase__: Union[str, Any] =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
| 273
| 1
|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : bool = True ,lowercase__ : bool = False ):
__lowercase = scheduler
__lowercase = optimizers if isinstance(lowercase__ ,(list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : str ,**lowercase__ : str ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase__ ,**lowercase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(lowercase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,'''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
self.scheduler.step(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE ( self : int ):
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ):
self.scheduler.load_state_dict(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[Any] ,**lowercase__ : List[str] ):
return self.scheduler.print_lr(*lowercase__ ,**lowercase__ )
| 104
|
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 81
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> Union[str, Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self , A_ ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )[0]
lowerCAmelCase = model(__A , token_type_ids=__A )[0]
lowerCAmelCase = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
lowerCAmelCase = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
lowerCAmelCase
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Optional[int] = False
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DebertaVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __snake_case ( self ) -> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def __snake_case ( self ) -> Tuple:
pass
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowerCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 361
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
| 187
| 0
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
| 207
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=10 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=None , ):
'''simple docstring'''
__snake_case : List[str] = size if size is not None else {'shortest_edge': 18}
__snake_case : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : List[str] = parent
__snake_case : Dict = batch_size
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = num_frames
__snake_case : List[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Dict = max_resolution
__snake_case : Dict = do_resize
__snake_case : Any = size
__snake_case : int = do_normalize
__snake_case : Optional[int] = image_mean
__snake_case : List[str] = image_std
__snake_case : Optional[int] = crop_size
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __UpperCamelCase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__snake_case : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__snake_case : int = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__snake_case : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for video in video_inputs:
self.assertIsInstance(a_ , a_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__snake_case : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 350
|
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = DDIMPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a_ = False
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def A ( self : List[Any] , _A : str , _A : List[Any]=0 ) -> Optional[int]:
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int ) -> int:
UpperCAmelCase_ : Optional[int] = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Tuple = pipe(**_A ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase_ : Optional[Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : List[Any] ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A ( self : Any ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def A ( self : Optional[Any] ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A ( self : Dict ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Any = '''google/ddpm-cifar10-32'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(_A )
UpperCAmelCase_ : int = DDIMScheduler()
UpperCAmelCase_ : Optional[int] = DDIMPipeline(unet=_A , scheduler=_A )
ddim.to(_A )
ddim.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = ddim(generator=_A , eta=0.0 , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = '''google/ddpm-ema-bedroom-256'''
UpperCAmelCase_ : Any = UNetaDModel.from_pretrained(_A )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler.from_pretrained(_A )
UpperCAmelCase_ : int = DDIMPipeline(unet=_A , scheduler=_A )
ddpm.to(_A )
ddpm.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ddpm(generator=_A , output_type='''numpy''' ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : int = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 304
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple):
A = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def SCREAMING_SNAKE_CASE__ (self : Dict):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Any):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.0_2)) < 1E-5
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = len(__SCREAMING_SNAKE_CASE)
A = self.dummy_model()
A = self.dummy_sample_deter
A = self.dummy_sample_deter + 0.1
A = self.dummy_sample_deter - 0.1
A = samplea.shape[0]
A = torch.stack([samplea, samplea, samplea] , dim=0)
A = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
A = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
A = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
A = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
A = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5) < 1E-3
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = len(__SCREAMING_SNAKE_CASE)
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
A = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
A = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="v_prediction")
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = len(__SCREAMING_SNAKE_CASE)
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
A = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
A = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
A = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
A = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = [1_0_0, 8_7, 5_0, 1, 0]
A = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__SCREAMING_SNAKE_CASE)
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 57
|
"""simple docstring"""
import numpy as np
import datasets
__A : Optional[int] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A : List[str] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# convert to numpy arrays
A = np.array(__SCREAMING_SNAKE_CASE)
A = np.array(__SCREAMING_SNAKE_CASE)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
A = X - np.mean(__SCREAMING_SNAKE_CASE)
A = np.cov(reference_distribution.T)
try:
A = np.linalg.inv(__SCREAMING_SNAKE_CASE)
except np.linalg.LinAlgError:
A = np.linalg.pinv(__SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 57
| 1
|
from __future__ import annotations
class __A :
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=None ):
lowerCAmelCase : Dict = data
lowerCAmelCase : List[Any] = None
def __repr__( self : Tuple ):
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = self
while temp:
string_rep.append(f"{temp.data}" )
lowerCAmelCase : Any = temp.next
return "->".join(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if not elements_list:
raise Exception('The Elements List is empty' )
lowerCAmelCase : Dict = Node(elements_list[0] )
for i in range(1, len(_UpperCAmelCase ) ):
lowerCAmelCase : Union[str, Any] = Node(elements_list[i] )
lowerCAmelCase : Union[str, Any] = current.next
return head
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(_UpperCAmelCase, _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[int] = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_UpperCAmelCase )
print('Elements in Reverse:' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 138
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138
| 1
|
'''simple docstring'''
lowerCAmelCase: str = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 96
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
@flax_register_to_config
class a__( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = 32
lowercase__ = 4
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = False
def lowercase_ ( self : List[Any] , __snake_case : jax.random.KeyArray ):
# init input tensors
a : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
a : str = jnp.zeros(__snake_case , dtype=jnp.floataa )
a : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
a : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a , a : Optional[int] = jax.random.split(__snake_case )
a : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case )["params"]
def lowercase_ ( self : Union[str, Any] ):
a : int = self.block_out_channels
a : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a : str = self.num_attention_heads or self.attention_head_dim
# input
a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a : Optional[int] = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
a : Optional[Any] = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
a : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
a : str = (num_attention_heads,) * len(self.down_block_types )
# down
a : int = []
a : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a : List[Any] = output_channel
a : Dict = block_out_channels[i]
a : Union[str, Any] = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : List[str] = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
a : Dict = down_blocks
# mid
a : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a : List[Any] = []
a : Dict = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a : Dict = output_channel
a : Dict = reversed_block_out_channels[i]
a : Dict = reversed_block_out_channels[min(i + 1 , len(__snake_case ) - 1 )]
a : List[Any] = i == len(__snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : Dict = FlaxUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__snake_case )
a : Dict = output_channel
a : int = up_blocks
# out
a : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : bool = True , __snake_case : bool = False , ):
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
a : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
a : Tuple = timesteps.astype(dtype=jnp.floataa )
a : Optional[int] = jnp.expand_dims(__snake_case , 0 )
a : Optional[Any] = self.time_proj(__snake_case )
a : Optional[int] = self.time_embedding(__snake_case )
# 2. pre-process
a : str = jnp.transpose(__snake_case , (0, 2, 3, 1) )
a : Optional[int] = self.conv_in(__snake_case )
# 3. down
a : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
a , a : Any = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
a , a : Optional[Any] = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__snake_case , __snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a : Tuple = new_down_block_res_samples
# 4. mid
a : int = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
a : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = up_block(
__snake_case , temb=__snake_case , encoder_hidden_states=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train , )
else:
a : Optional[Any] = up_block(__snake_case , temb=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train )
# 6. post-process
a : Any = self.conv_norm_out(__snake_case )
a : List[str] = nn.silu(__snake_case )
a : Optional[int] = self.conv_out(__snake_case )
a : Any = jnp.transpose(__snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__snake_case )
| 96
| 1
|
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class a_ (TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
super().__init__(features=SCREAMING_SNAKE_CASE__ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE__ )}, as `jaxlib.xla_extension.Device` '
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_lowerCAmelCase : Dict = device if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
_lowerCAmelCase : List[Any] = str(jax.devices()[0] )
_lowerCAmelCase : List[str] = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ):
import jax
return {str(SCREAMING_SNAKE_CASE__ ): device for device in jax.devices()}
def __UpperCamelCase ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
return column
def __UpperCamelCase ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , (str, bytes, type(SCREAMING_SNAKE_CASE__ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowerCAmelCase : Optional[int] = {}
if isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : int = {'''dtype''': jnp.intaa}
else:
_lowerCAmelCase : List[Any] = {'''dtype''': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowerCAmelCase : List[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
_lowerCAmelCase : Tuple = np.asarray(SCREAMING_SNAKE_CASE__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self , snake_case_ ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE__ , """__array__""" ) and not isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ):
_lowerCAmelCase : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self , snake_case_ ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE__ , map_list=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Any = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ , pa_table.column_names[0] )
_lowerCAmelCase : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : List[str] = self._consolidate(SCREAMING_SNAKE_CASE__ )
return column
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : List[Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
for column_name in batch:
_lowerCAmelCase : Dict = self._consolidate(batch[column_name] )
return batch
| 309
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCamelCase ( a_ : str , a_ : Dict , a_ : Any , a_ : str ) -> str:
__SCREAMING_SNAKE_CASE :int = s.rsplit(a_ , a_ )
return new.join(a_ )
def __lowerCamelCase ( a_ : List[str] ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( a_ : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = {}
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__SCREAMING_SNAKE_CASE :Optional[Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__SCREAMING_SNAKE_CASE :str = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.b''' , '''.bias''' , 1 )
__SCREAMING_SNAKE_CASE :Optional[Any] = value.float()
return upgrade
@torch.no_grad()
def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int]=None , a_ : Dict=True ) -> Union[str, Any]:
from dall_e import Encoder
__SCREAMING_SNAKE_CASE :int = Encoder()
if os.path.exists(a_ ):
__SCREAMING_SNAKE_CASE :Dict = torch.load(a_ )
else:
__SCREAMING_SNAKE_CASE :List[str] = torch.hub.load_state_dict_from_url(a_ )
if isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = ckpt.state_dict()
encoder.load_state_dict(a_ )
if config_path is not None:
__SCREAMING_SNAKE_CASE :Any = FlavaImageCodebookConfig.from_pretrained(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = FlavaImageCodebookConfig()
__SCREAMING_SNAKE_CASE :Tuple = FlavaImageCodebook(a_ ).eval()
__SCREAMING_SNAKE_CASE :List[str] = encoder.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = upgrade_state_dict(a_ )
hf_model.load_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = hf_model.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = count_parameters(a_ )
__SCREAMING_SNAKE_CASE :Any = count_parameters(a_ )
assert torch.allclose(a_ , a_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(a_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 191
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a : Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
a : Dict = 3E8 # unit of c : m * s^-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
a : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a : Any = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(_lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple="" ,_lowerCamelCase : List[Any]="." ):
_lowerCAmelCase : Union[str, Any] = []
for k, v in d.items():
_lowerCAmelCase : List[str] = parent_key + sep + k if parent_key else k
if isinstance(_lowerCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_lowerCamelCase ,_lowerCamelCase ,sep=_lowerCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = argparse.Namespace()
with open(_lowerCamelCase ,"""r""" ) as yaml_file:
try:
_lowerCAmelCase : int = yaml.load(_lowerCamelCase ,Loader=yaml.FullLoader )
_lowerCAmelCase : Optional[int] = flatten_yaml_as_dict(_lowerCamelCase )
for k, v in flat_cfg.items():
setattr(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(_lowerCamelCase ,str(_lowerCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : int = MobileViTVaConfig()
_lowerCAmelCase : Optional[Any] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_lowerCAmelCase : Union[str, Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_lowerCAmelCase : Any = 384
else:
_lowerCAmelCase : Dict = 256
_lowerCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_lowerCAmelCase : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_lowerCAmelCase : List[str] = 384
else:
_lowerCAmelCase : str = 256
_lowerCAmelCase : Any = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_lowerCAmelCase : Any = 151
_lowerCAmelCase : Optional[Any] = 512
_lowerCAmelCase : Tuple = """ade20k-id2label.json"""
_lowerCAmelCase : List[str] = True
elif task_name.startswith("""voc_""" ):
_lowerCAmelCase : Optional[int] = 21
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : str = """pascal-voc-id2label.json"""
_lowerCAmelCase : List[Any] = True
# orig_config
_lowerCAmelCase : List[Any] = load_orig_config_file(_lowerCamelCase )
assert getattr(_lowerCamelCase ,"""model.classification.name""" ,-1 ) == "mobilevit_v2", "Invalid model"
_lowerCAmelCase : str = getattr(_lowerCamelCase ,"""model.classification.mitv2.width_multiplier""" ,1.0 )
assert (
getattr(_lowerCamelCase ,"""model.classification.mitv2.attn_norm_layer""" ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase ,"""model.classification.activation.name""" ,"""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_lowerCAmelCase : List[Any] = getattr(_lowerCamelCase ,"""model.segmentation.output_stride""" ,16 )
if "_deeplabv3" in task_name:
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase ,"""model.segmentation.deeplabv3.aspp_rates""" ,[12, 24, 36] )
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase ,"""model.segmentation.deeplabv3.aspp_out_channels""" ,512 )
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase ,"""model.segmentation.deeplabv3.aspp_dropout""" ,0.1 )
# id2label
_lowerCAmelCase : Dict = """huggingface/label-files"""
_lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Any ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int]=False ) -> str:
if base_model:
_lowerCAmelCase : List[Any] = """"""
else:
_lowerCAmelCase : Optional[Any] = """mobilevitv2."""
_lowerCAmelCase : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_lowerCAmelCase : Tuple = k[8:]
else:
_lowerCAmelCase : Any = k
if ".block." in k:
_lowerCAmelCase : Any = k_new.replace(""".block.""" ,""".""" )
if ".conv." in k:
_lowerCAmelCase : Any = k_new.replace(""".conv.""" ,""".convolution.""" )
if ".norm." in k:
_lowerCAmelCase : List[Any] = k_new.replace(""".norm.""" ,""".normalization.""" )
if "conv_1." in k:
_lowerCAmelCase : Dict = k_new.replace("""conv_1.""" ,f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
_lowerCAmelCase : List[Any] = k_new.replace(f"layer_{i}." ,f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_lowerCAmelCase : List[Any] = k_new.replace(""".exp_1x1.""" ,""".expand_1x1.""" )
if ".red_1x1." in k:
_lowerCAmelCase : str = k_new.replace(""".red_1x1.""" ,""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
_lowerCAmelCase : List[Any] = k_new.replace(f"layer_{i}.0." ,f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
_lowerCAmelCase : str = k_new.replace(f"layer_{i}.1.local_rep.0." ,f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
_lowerCAmelCase : Dict = k_new.replace(f"layer_{i}.1.local_rep.1." ,f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_lowerCAmelCase : Any = [0, 1]
elif i == 4:
_lowerCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
_lowerCAmelCase : Optional[Any] = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
_lowerCAmelCase : List[Any] = k_new.replace(
f"layer_{i}.1.global_rep.{j}." ,f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
_lowerCAmelCase : Union[str, Any] = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." ,f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
_lowerCAmelCase : Any = k_new.replace(f"layer_{i}.1.conv_proj." ,f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_lowerCAmelCase : int = k_new.replace("""pre_norm_attn.0.""" ,"""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_lowerCAmelCase : int = k_new.replace("""pre_norm_attn.1.""" ,"""attention.""" )
if "pre_norm_ffn.0." in k:
_lowerCAmelCase : int = k_new.replace("""pre_norm_ffn.0.""" ,"""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_lowerCAmelCase : Optional[Any] = k_new.replace("""pre_norm_ffn.1.""" ,"""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_lowerCAmelCase : Any = k_new.replace("""pre_norm_ffn.3.""" ,"""ffn.conv2.""" )
if "classifier.1." in k:
_lowerCAmelCase : Optional[Any] = k_new.replace("""classifier.1.""" ,"""classifier.""" )
if "seg_head." in k:
_lowerCAmelCase : List[Any] = k_new.replace("""seg_head.""" ,"""segmentation_head.""" )
if ".aspp_layer." in k:
_lowerCAmelCase : int = k_new.replace(""".aspp_layer.""" ,""".""" )
if ".aspp_pool." in k:
_lowerCAmelCase : Dict = k_new.replace(""".aspp_pool.""" ,""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Tuple:
_lowerCAmelCase : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(_lowerCamelCase )
for k in keys_to_ignore:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Any ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = get_mobilevitva_config(_lowerCamelCase ,_lowerCamelCase )
# load original state_dict
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase ,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(_lowerCamelCase ).eval()
_lowerCAmelCase : int = False
else:
_lowerCAmelCase : Any = MobileViTVaForImageClassification(_lowerCamelCase ).eval()
_lowerCAmelCase : Any = False
# remove and rename some keys of load the original model
_lowerCAmelCase : List[Any] = checkpoint
remove_unused_keys(_lowerCamelCase )
_lowerCAmelCase : Tuple = create_rename_keys(_lowerCamelCase ,base_model=_lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# load modified state_dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase : str = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
_lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
_lowerCAmelCase : List[str] = outputs.logits
_lowerCAmelCase : List[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_lowerCAmelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,_lowerCamelCase ,atol=1e-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
_a : List[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 369
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a__ , "width_multiplier" ) )
class _snake_case :
def __init__( self , a__ , a__=13 , a__=64 , a__=2 , a__=3 , a__="swish" , a__=3 , a__=32 , a__=0.1 , a__=0.0_2 , a__=True , a__=True , a__=10 , a__=None , a__=0.2_5 , a__=0.0 , a__=0.0 , ) -> Dict:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = make_divisible(512 * width_multiplier , divisor=8 )
snake_case_ = hidden_act
snake_case_ = conv_kernel_size
snake_case_ = output_stride
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = width_multiplier
snake_case_ = ffn_dropout
snake_case_ = attn_dropout
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
snake_case_ = MobileViTVaModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = MobileViTVaForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = MobileViTVaForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Dict = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : int = False
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = MobileViTVaModelTester(self )
snake_case_ = MobileViTVaConfigTester(self , config_class=a__ , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(a__ , a__ , a__ ):
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = 5
self.assertEqual(len(a__ ) , a__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case_ = 2
for i in range(len(a__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileViTVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
a__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = model.to(a__ )
snake_case_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a__ )
snake_case_ = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = model.to(a__ )
snake_case_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(50, 60)] )
snake_case_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a__ )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=a__ )
snake_case_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a__ )
| 85
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = "cpu" , __lowerCAmelCase = None ):
"""simple docstring"""
snake_case__ : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location=__SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
snake_case__ : Union[str, Any] = v.half()
if save_path is None: # overwrite src_path
snake_case__ : Tuple = src_path
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 369
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : Optional[int] = len(__lowerCAmelCase ) + 1
snake_case__ : Tuple = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ : str = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
snake_case__ : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
snake_case__ : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ : List[str] = dp[i - 1][j]
else:
snake_case__ : Union[str, Any] = 0
else:
snake_case__ : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ = '''aab'''
A__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 44
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A : str = logging.get_logger(__name__)
@add_end_docstrings(
a_ , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class A_ (a_ ):
def _lowercase ( self , _A ):
'''simple docstring'''
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.get_masked_index(_A )
UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase ( self , _A ):
'''simple docstring'''
if isinstance(_A , _A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_A )
def _lowercase ( self , _A , _A=None , **_A ):
'''simple docstring'''
if return_tensors is None:
UpperCAmelCase = self.framework
UpperCAmelCase = self.tokenizer(_A , return_tensors=_A )
self.ensure_exactly_one_mask_token(_A )
return model_inputs
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.model(**_A )
UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def _lowercase ( self , _A , _A=5 , _A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase = target_ids.shape[0]
UpperCAmelCase = model_outputs['''input_ids'''][0]
UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase = outputs.numpy()
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = stable_softmax(_A , axis=-1 )
if target_ids is not None:
UpperCAmelCase = tf.gather_nd(tf.squeeze(_A , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase = tf.expand_dims(_A , 0 )
UpperCAmelCase = tf.math.top_k(_A , k=_A )
UpperCAmelCase , UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase = probs[..., target_ids]
UpperCAmelCase , UpperCAmelCase = probs.topk(_A )
UpperCAmelCase = []
UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase = target_ids[p].tolist()
UpperCAmelCase = p
# Filter padding out:
UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_A )
result.append(_A )
if single_mask:
return result[0]
return result
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase = [targets]
try:
UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase = {}
UpperCAmelCase = []
for target in targets:
UpperCAmelCase = vocab.get(_A , _A )
if id_ is None:
UpperCAmelCase = self.tokenizer(
_A , add_special_tokens=_A , return_attention_mask=_A , return_token_type_ids=_A , max_length=1 , truncation=_A , )['''input_ids''']
if len(_A ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase = list(set(_A ) )
if len(_A ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase = np.array(_A )
return target_ids
def _lowercase ( self , _A=None , _A=None ):
'''simple docstring'''
UpperCAmelCase = {}
if targets is not None:
UpperCAmelCase = self.get_target_ids(_A , _A )
UpperCAmelCase = target_ids
if top_k is not None:
UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _A , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = super().__call__(_A , **_A )
if isinstance(_A , _A ) and len(_A ) == 1:
return outputs[0]
return outputs
| 273
|
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273
| 1
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = model.config
__UpperCAmelCase : int = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, )
__UpperCAmelCase : List[str] = MBartConfig(
is_decoder=snake_case__, is_encoder_decoder=snake_case__, add_cross_attention=snake_case__, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=snake_case__, add_final_layer_norm=snake_case__, )
return encoder_config, decoder_config
def _UpperCamelCase ( snake_case__ ) -> Any:
if "encoder.model" in name:
__UpperCAmelCase : Optional[Any] = name.replace("encoder.model", "encoder" )
if "decoder.model" in name:
__UpperCAmelCase : Optional[int] = name.replace("decoder.model", "decoder" )
if "patch_embed.proj" in name:
__UpperCAmelCase : List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Optional[Any] = name.replace("patch_embed.norm", "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
__UpperCAmelCase : Tuple = "encoder." + name
if "attn.proj" in name:
__UpperCAmelCase : List[str] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name and "mask" not in name:
__UpperCAmelCase : Optional[Any] = name.replace("attn", "attention.self" )
if "norm1" in name:
__UpperCAmelCase : Dict = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : int = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : List[Any] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : List[Any] = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
__UpperCAmelCase : Any = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
__UpperCAmelCase : Any = "encoder.layernorm.bias"
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Union[str, Any] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__UpperCAmelCase : Union[str, Any] = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[3] )
__UpperCAmelCase : str = int(key_split[5] )
__UpperCAmelCase : List[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase : Optional[Any] = val[:dim, :]
__UpperCAmelCase : List[Any] = val[dim : dim * 2, :]
__UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
__UpperCAmelCase : str = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : Optional[int] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCAmelCase : int = val
return orig_state_dict
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Dict:
# load original model
__UpperCAmelCase : int = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_configs(snake_case__ )
__UpperCAmelCase : Dict = DonutSwinModel(snake_case__ )
__UpperCAmelCase : Tuple = MBartForCausalLM(snake_case__ )
__UpperCAmelCase : str = VisionEncoderDecoderModel(encoder=snake_case__, decoder=snake_case__ )
model.eval()
__UpperCAmelCase : str = original_model.state_dict()
__UpperCAmelCase : int = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
__UpperCAmelCase : List[Any] = load_dataset("hf-internal-testing/example-documents" )
__UpperCAmelCase : str = dataset["test"][0]["image"].convert("RGB" )
__UpperCAmelCase : Dict = XLMRobertaTokenizerFast.from_pretrained(snake_case__, from_slow=snake_case__ )
__UpperCAmelCase : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
__UpperCAmelCase : List[str] = DonutProcessor(snake_case__, snake_case__ )
__UpperCAmelCase : int = processor(snake_case__, return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCAmelCase : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : int = "When is the coffee break?"
__UpperCAmelCase : Union[str, Any] = task_prompt.replace("{user_input}", snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCAmelCase : List[Any] = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCAmelCase : Union[str, Any] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCAmelCase : int = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCAmelCase : Dict = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCAmelCase : Tuple = "hello world"
else:
raise ValueError("Model name not supported" )
__UpperCAmelCase : int = original_model.decoder.tokenizer(snake_case__, add_special_tokens=snake_case__, return_tensors="pt" )[
"input_ids"
]
__UpperCAmelCase : Any = original_model.encoder.model.patch_embed(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : int = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__, snake_case__, atol=1e-3 )
# verify encoder hidden states
__UpperCAmelCase : int = original_model.encoder(snake_case__ )
__UpperCAmelCase : Union[str, Any] = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__, snake_case__, atol=1e-2 )
# verify decoder hidden states
__UpperCAmelCase : List[Any] = original_model(snake_case__, snake_case__, snake_case__ ).logits
__UpperCAmelCase : List[Any] = model(snake_case__, decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__, snake_case__, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
_snake_case = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342
| 1
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCamelCase__: Optional[int] = open # noqa: we just need to have a builtin inside this module to test it properly
| 23
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
try:
__UpperCAmelCase : Union[str, Any] = float(_UpperCAmelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCAmelCase : str = decimal - int(_UpperCAmelCase )
if fractional_part == 0:
return int(_UpperCAmelCase ), 1
else:
__UpperCAmelCase : Dict = len(str(_UpperCAmelCase ).split("." )[1] )
__UpperCAmelCase : List[Any] = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase : Any = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase : Tuple = denominator, numerator
while True:
__UpperCAmelCase : Dict = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase : str = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase : List[str] = numerator / divisor, denominator / divisor
return int(_UpperCAmelCase ), int(_UpperCAmelCase )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 37
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "vit_msn"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Union[str, Any] = qkv_bias
| 44
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Union[str, Any] , lowercase : Tuple = 1 , lowercase : Any = 1 , lowercase : int = 1.0e4 , lowercase : List[str] = False , lowercase : Optional[Any] = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowerCamelCase_ = float(embedding_dim // 2 )
lowerCamelCase_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase_ = min_timescale * jnp.exp(jnp.arange(lowercase__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase_ = jnp.expand_dims(lowercase__ , 1 ) * jnp.expand_dims(lowercase__ , 0 )
# scale embeddings
lowerCamelCase_ = scale * emb
if flip_sin_to_cos:
lowerCamelCase_ = jnp.concatenate([jnp.cos(lowercase__ ), jnp.sin(lowercase__ )] , axis=1 )
else:
lowerCamelCase_ = jnp.concatenate([jnp.sin(lowercase__ ), jnp.cos(lowercase__ )] , axis=1 )
lowerCamelCase_ = jnp.reshape(lowercase__ , [jnp.shape(lowercase__ )[0], embedding_dim] )
return signal
class A( nn.Module ):
'''simple docstring'''
UpperCamelCase = 32
UpperCamelCase = jnp.floataa
@nn.compact
def __call__( self : Tuple , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.silu(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(__SCREAMING_SNAKE_CASE )
return temb
class A( nn.Module ):
'''simple docstring'''
UpperCamelCase = 32
UpperCamelCase = False
UpperCamelCase = 1
@nn.compact
def __call__( self : Optional[int] , A_ : str ) -> List[Any]:
"""simple docstring"""
return get_sinusoidal_embeddings(
__SCREAMING_SNAKE_CASE , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 363
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208
| 0
|
def a__ ( A_ = 600851475143 ):
'''simple docstring'''
try:
__magic_name__ = int(A_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__magic_name__ = 2
__magic_name__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__magic_name__ = i
while n % i == 0:
__magic_name__ = n // i
i += 1
return int(A_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AlbertTokenizer
__UpperCAmelCase : Any = AlbertTokenizerFast
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = True
__UpperCAmelCase : Union[str, Any] = True
def _lowercase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = AlbertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Dict ):
__lowercase = "this is a test"
__lowercase = "this is a test"
return input_text, output_text
def _lowercase ( self : str ):
__lowercase = "<pad>"
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ), UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<pad>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "▁eloquent" )
self.assertEqual(len(UpperCAmelCase__ ), 3_0_0_0_0 )
def _lowercase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size, 3_0_0_0_0 )
def _lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = "I was born in 92000, and this is falsé."
__lowercase = tokenizer.tokenize(UpperCAmelCase__ )
__lowercase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ )
__lowercase = rust_tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(UpperCAmelCase__ )
__lowercase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = AlbertTokenizer(UpperCAmelCase__, keep_accents=UpperCAmelCase__ )
__lowercase = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__, ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ), [4_8, 2_5, 2_1, 1_2_8_9] )
__lowercase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__lowercase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
__lowercase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], )
def _lowercase ( self : List[str] ):
__lowercase = AlbertTokenizer(UpperCAmelCase__ )
__lowercase = tokenizer.encode("sequence builders" )
__lowercase = tokenizer.encode("multi-sequence build" )
__lowercase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__, UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase ( self : Union[str, Any] ):
# fmt: off
__lowercase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
| 144
|
"""simple docstring"""
import numpy
# List of input, output pairs
_a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_a = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_a = [2, 4, 1, 5]
_a = len(train_data)
_a = 0.009
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : List[Any]="train") -> Optional[Any]:
'''simple docstring'''
return calculate_hypothesis_value(UpperCamelCase_, UpperCamelCase_) - output(
UpperCamelCase_, UpperCamelCase_)
def _A ( UpperCamelCase_ : List[Any]) -> Union[str, Any]:
'''simple docstring'''
__lowercase = 0
for i in range(len(UpperCamelCase_) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int]) -> Dict:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : List[str]) -> int:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Tuple=m) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(UpperCamelCase_):
if index == -1:
summation_value += _error(UpperCamelCase_)
else:
summation_value += _error(UpperCamelCase_) * train_data[i][0][index]
return summation_value
def _A ( UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = summation_of_cost_derivative(UpperCamelCase_, UpperCamelCase_) / m
return cost_derivative_value
def _A ( ) -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000_002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0, len(UpperCamelCase_)):
__lowercase = get_cost_derivative(i - 1)
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_, UpperCamelCase_, atol=UpperCamelCase_, rtol=UpperCamelCase_, ):
break
__lowercase = temp_parameter_vector
print(("Number of iterations:", j))
def _A ( ) -> int:
'''simple docstring'''
for i in range(len(UpperCamelCase_)):
print(("Actual output value:", output(UpperCamelCase_, "test")))
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_, "test")))
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 144
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Dict ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] ="AutoTokenizer"
def __init__( self : int , __A : List[str] , __A : Tuple ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : Union[str, Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : Optional[int] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : Optional[Any] , *__A : List[str] , **__A : List[str] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : Union[str, Any] , *__A : Dict , **__A : Optional[Any] ):
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_a = 5_00_03
_a = 5_00_02
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = PLBartTokenizer
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[Any] = False
def _lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = PLBartTokenizer(__A, language_codes="base", keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int ):
__lowercase = PLBartTokenizer(__A, language_codes="base", keep_accents=__A )
__lowercase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
__lowercase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
__lowercase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
], )
__lowercase = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 4, __A )]
self.assertListEqual(__A, ["__java__", "__python__", "__en_XX__", "<mask>"] )
__lowercase = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
__lowercase = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A, skip_special_tokens=__A, clean_up_tokenization_spaces=__A ), __A, )
def _lowercase ( self : Any ):
__lowercase = PLBartTokenizer(__A, language_codes="multi", keep_accents=__A )
__lowercase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
__lowercase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
__lowercase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
], )
__lowercase = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 7, __A )]
self.assertListEqual(
__A, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
__lowercase = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
__lowercase = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A, skip_special_tokens=__A, clean_up_tokenization_spaces=__A ), __A, )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "uclanlp/plbart-python-en_XX"
__UpperCAmelCase : Any = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__UpperCAmelCase : Any = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__UpperCAmelCase : Any = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase ( cls : int ):
__lowercase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name, language_codes="base", src_lang="python", tgt_lang="en_XX" )
__lowercase = 1
return cls
def _lowercase ( self : Optional[int] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"], 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"], 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"], 5_0_0_0_3 )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __A )
def _lowercase ( self : Tuple ):
self.assertIn(__A, self.tokenizer.all_special_ids )
__lowercase = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
__lowercase = self.tokenizer.decode(__A, skip_special_tokens=__A )
__lowercase = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__A )
self.assertEqual(__A, __A )
self.assertNotIn(self.tokenizer.eos_token, __A )
def _lowercase ( self : Tuple ):
__lowercase = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0], __A )
__lowercase = 1_0
__lowercase = self.tokenizer(__A, max_length=__A, truncation=__A ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], __A )
self.assertEqual(len(__A ), __A )
def _lowercase ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ), [5_0_0_0_4, 5_0_0_0_1] )
def _lowercase ( self : Tuple ):
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
__lowercase = PLBartTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __A )
@require_torch
def _lowercase ( self : Dict ):
__lowercase = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=__A, return_tensors="pt" )
__lowercase = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0], __A )
self.assertEqual(batch.decoder_input_ids[1][-1], 2 )
self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE] )
@require_torch
def _lowercase ( self : Any ):
__lowercase = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__A, truncation=__A, max_length=len(self.expected_src_tokens ), return_tensors="pt", )
__lowercase = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id )
self.assertIsInstance(__A, __A )
self.assertEqual((2, 2_6), batch.input_ids.shape )
self.assertEqual((2, 2_6), batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __A )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase ( self : Any ):
__lowercase = self.tokenizer(self.src_text, padding=__A, truncation=__A, max_length=3, return_tensors="pt" )
__lowercase = self.tokenizer(
text_target=self.tgt_text, padding=__A, truncation=__A, max_length=1_0, return_tensors="pt" )
__lowercase = targets["""input_ids"""]
__lowercase = shift_tokens_right(__A, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 1_0 )
@require_torch
def _lowercase ( self : Tuple ):
__lowercase = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="java" )
self.assertEqual(
nested_simplify(__A ), {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
}, )
| 351
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
'''simple docstring'''
print("Making key files...")
make_key_files("rsa", 1024)
print("Key files generation successful.")
def _A ( UpperCamelCase_ : int) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
print("Generating prime q...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
__lowercase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
__lowercase = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(UpperCamelCase_, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
__lowercase = cryptoMath.find_mod_inverse(UpperCamelCase_, (p - 1) * (q - 1))
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print("\nWARNING:")
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program.")
sys.exit()
__lowercase ,__lowercase = generate_key(UpperCamelCase_)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 144
| 0
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LongformerTokenizer
_a = True
_a = LongformerTokenizerFast
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCamelCase__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase__ :Any = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''lower newer'''
UpperCamelCase__ :List[str] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase__ :str = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = tokens + [tokenizer.unk_token]
UpperCamelCase__ :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
UpperCamelCase__ :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = '''Encode this sequence.'''
UpperCamelCase__ :Any = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
UpperCamelCase__ :Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
UpperCamelCase__ :Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
UpperCamelCase__ :Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :int = '''Encode <mask> sequence'''
UpperCamelCase__ :Tuple = '''Encode <mask>sequence'''
UpperCamelCase__ :List[Any] = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :List[str] = encoded.index(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :Dict = encoded.index(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''A, <mask> AllenNLP sentence.'''
UpperCamelCase__ :Union[str, Any] = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCamelCase__ :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCamelCase__ :Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase__ :List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ :str = F'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :str = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 97
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 97
| 1
|
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if num < 0:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -num
SCREAMING_SNAKE_CASE__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 191
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
if length <= 0 or not isinstance(__A , __A ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE_ = '''The dog is cute and lives in the garden house'''
SCREAMING_SNAKE_CASE_ = jnp.array([tokenizer.encode(_A )] )
SCREAMING_SNAKE_CASE_ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
SCREAMING_SNAKE_CASE_ = model(_A )['''last_hidden_state''']
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 257
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : List[Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( A : Optional[Any], A : Tuple, A : Optional[int]=None, A : Dict=None, A : Tuple=None, A : Any=None, A : str=None, A : Optional[Any]=None, ):
'''simple docstring'''
if attention_mask is None:
a = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
a = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
a = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=False , __lowerCamelCase : Tuple=99 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Any=0.02 , ) -> Optional[int]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = initializer_range
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
a = shift_tokens_right(__lowerCamelCase , 1 , 2 )
a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
a = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : List[str] ) -> Dict:
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = 20
a = model_class_name(__lowerCamelCase )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
a = model.decode(__lowerCamelCase , __lowerCamelCase )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> Any:
a = 20
a = model_class_name(__lowerCamelCase )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
a = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 99
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a = input_ids.shape[0]
a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a , a , a = self._get_config_and_data()
a = FlaxBlenderbotSmallForConditionalGeneration(__lowerCamelCase )
a = lm_model(input_ids=__lowerCamelCase )
a = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a = FlaxBlenderbotSmallForConditionalGeneration(__lowerCamelCase )
a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
a = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
a = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Tuple:
a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
a = shift_tokens_right(__lowerCamelCase , 1 , 2 )
a = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
a = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case__ (_UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = FlaxBlenderbotSmallModelTester(self )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
a = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
a = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = model_class(__lowerCamelCase )
a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
a = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : List[str] ) -> str:
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a = np.ones((1, 1) ) * model.config.eos_token_id
a = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 107
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
from manim import *
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i, rect in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = fill.copy().set_fill(_lowerCAmelCase , opacity=0.8 )
target.move_to(_lowerCAmelCase )
model_arr.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = Square(0.3 )
input.set_fill(_lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCAmelCase , buff=0.5 )
self.play(Write(_lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCAmelCase ) )
self.play(FadeOut(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = Arrow(start=_lowerCAmelCase , end=_lowerCAmelCase , color=_lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE_ = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
SCREAMING_SNAKE_CASE_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_lowerCAmelCase ) , Circumscribe(model_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE_ = AnimationGroup(
FadeOut(_lowerCAmelCase , run_time=0.5 ) , MoveToTarget(_lowerCAmelCase , run_time=0.5 ) , FadeIn(_lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE_ = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE_ = a_c
SCREAMING_SNAKE_CASE_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCAmelCase ) , FadeOut(_lowerCAmelCase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE_ = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , MoveToTarget(_lowerCAmelCase ) )
self.wait()
| 355
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int:
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper
| 210
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
lowercase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowercase : Tuple = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowercase : List[Any] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ )
if reduce_labels:
lowercase : Optional[Any] = 255
lowercase : int = label - 1
lowercase : int = 255
lowercase : Tuple = label != ignore_index
lowercase : Optional[int] = np.not_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = pred_label[mask]
lowercase : Dict = np.array(SCREAMING_SNAKE_CASE__ )[mask]
lowercase : Union[str, Any] = pred_label[pred_label == label]
lowercase : List[Any] = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
lowercase : Any = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
lowercase : Tuple = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
lowercase : str = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> List[Any]:
lowercase : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase , lowercase , lowercase : List[str] = intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> Optional[Any]:
lowercase , lowercase , lowercase , lowercase : Tuple = total_intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# compute metrics
lowercase : str = {}
lowercase : List[Any] = total_area_intersect.sum() / total_area_label.sum()
lowercase : List[Any] = total_area_intersect / total_area_union
lowercase : Union[str, Any] = total_area_intersect / total_area_label
lowercase : Union[str, Any] = np.nanmean(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.nanmean(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = all_acc
lowercase : str = iou
lowercase : int = acc
if nan_to_num is not None:
lowercase : Tuple = {metric: np.nan_to_num(SCREAMING_SNAKE_CASE__ , nan=SCREAMING_SNAKE_CASE__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case = None ,snake_case = None ,snake_case = False ,):
'''simple docstring'''
lowercase : Union[str, Any] = mean_iou(
results=snake_case ,gt_seg_maps=snake_case ,num_labels=snake_case ,ignore_index=snake_case ,nan_to_num=snake_case ,label_map=snake_case ,reduce_labels=snake_case ,)
return iou_result
| 20
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : str = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
| 1
|
'''simple docstring'''
_lowerCAmelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 37
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''xlm-roberta-xl'''
def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 37
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167
|
import pprint
import requests
_A = '''https://zenquotes.io/api'''
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 167
| 1
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[Any] , __snake_case : List[Any]=1e-1_2 ) -> Optional[int]:
__A : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__snake_case , axis=1 ) , a_min=__snake_case ) ).T
__A : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__snake_case , axis=1 ) , a_min=__snake_case ) ).T
return jnp.matmul(__snake_case , norm_emb_a.T )
class SCREAMING_SNAKE_CASE (nn.Module ):
lowerCAmelCase = 42
lowerCAmelCase = jnp.floataa
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = FlaxCLIPVisionModule(self.config.vision_config)
__A : int = nn.Dense(self.config.projection_dim , use_bias=_UpperCAmelCase , dtype=self.dtype)
__A : str = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim))
__A : Tuple = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim))
__A : Tuple = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,))
__A : List[Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,))
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = self.vision_model(_UpperCAmelCase)[1]
__A : str = self.visual_projection(_UpperCAmelCase)
__A : List[str] = jax_cosine_distance(_UpperCAmelCase , self.special_care_embeds)
__A : Optional[Any] = jax_cosine_distance(_UpperCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__A : Union[str, Any] = 0.0
__A : int = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__A : Optional[Any] = jnp.round(_UpperCAmelCase , 3)
__A : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCAmelCase)
# Use a lower threshold if an image has any special care concept
__A : List[Any] = is_special_care * 0.01
__A : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__A : List[Any] = jnp.round(_UpperCAmelCase , 3)
__A : int = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = CLIPConfig
lowerCAmelCase = '''clip_input'''
lowerCAmelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = jnp.floataa , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
if input_shape is None:
__A : str = (1, 224, 224, 3)
__A : str = self.module_class(config=_UpperCAmelCase , dtype=_UpperCAmelCase , **_UpperCAmelCase)
super().__init__(_UpperCAmelCase , _UpperCAmelCase , input_shape=_UpperCAmelCase , seed=_UpperCAmelCase , dtype=_UpperCAmelCase , _do_init=_do_init)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = jax.random.normal(_UpperCAmelCase , _UpperCAmelCase)
__A ,__A : int = jax.random.split(_UpperCAmelCase)
__A : Optional[int] = {'params': params_rng, 'dropout': dropout_rng}
__A : int = self.module.init(_UpperCAmelCase , _UpperCAmelCase)['params']
return random_params
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : str = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{'params': params or self.params} , jnp.array(_UpperCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 190
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''llama'''
lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=4096 , _UpperCAmelCase=1_1008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Any = hidden_size
__A : Optional[Any] = intermediate_size
__A : str = num_hidden_layers
__A : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__A : List[Any] = num_attention_heads
__A : int = num_key_value_heads
__A : List[Any] = hidden_act
__A : Union[str, Any] = initializer_range
__A : List[Any] = rms_norm_eps
__A : Any = pretraining_tp
__A : Optional[Any] = use_cache
__A : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}')
__A : Optional[Any] = self.rope_scaling.get('type' , _UpperCAmelCase)
__A : Tuple = self.rope_scaling.get('factor' , _UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 190
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'pegasus'
A : int = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : int = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : List[str] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : int = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Optional[Any] = init_std
lowercase : Tuple = encoder_layerdrop
lowercase : Optional[int] = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : Any = encoder_layers
lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
| 173
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__a = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__a = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__a = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__a = '''allenai'''
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Tuple = dict((re.sub(R'''@@$''', '''''', _UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''', '''</w>''', _UpperCamelCase ), v) for k, v in d.items() )
lowercase : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowercase : Union[str, Any] = d[k] # restore
return da
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase : Union[str, Any] = basename(_UpperCamelCase )
lowercase : List[str] = dirname(_UpperCamelCase )
lowercase : Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase : List[str] = cls.hub_models()
lowercase : Tuple = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowercase : List[str] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
lowercase : int = hub_utils.from_pretrained(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, archive_map=_UpperCamelCase, **_UpperCamelCase )
lowercase : int = vars(chkpt['''args''']['''model'''] )
lowercase : Union[str, Any] = args['''source_lang''']
lowercase : Dict = args['''target_lang''']
lowercase : Optional[int] = dirname(_UpperCamelCase )
lowercase : str = basename(_UpperCamelCase )
# dicts
lowercase : Optional[Any] = os.path.join(_UpperCamelCase, f"""dict.{src_lang}.txt""" )
lowercase : Any = os.path.join(_UpperCamelCase, f"""dict.{tgt_lang}.txt""" )
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : List[Any] = rewrite_dict_keys(src_dict.indices )
lowercase : List[str] = len(_UpperCamelCase )
lowercase : Tuple = os.path.join(_UpperCamelCase, '''vocab-src.json''' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase : str = True
for k in src_vocab.keys():
if not k.islower():
lowercase : Dict = False
break
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase : Tuple = len(_UpperCamelCase )
lowercase : Dict = os.path.join(_UpperCamelCase, '''vocab-tgt.json''' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# merges_file (bpecodes)
lowercase : Optional[int] = os.path.join(_UpperCamelCase, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase : str = os.path.join(_UpperCamelCase, _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase, encoding='''utf-8''' ) as fin:
lowercase : List[str] = fin.read()
lowercase : Tuple = re.sub(R''' \d+$''', '''''', _UpperCamelCase, 0, re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as fout:
fout.write(_UpperCamelCase )
# model config
lowercase : Dict = os.path.join(_UpperCamelCase, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowercase : Optional[int] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowercase : Dict = 5
lowercase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase : int = best_score_hparams[model_dir]['''length_penalty''']
else:
lowercase : Any = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# tokenizer config
lowercase : Any = os.path.join(_UpperCamelCase, _UpperCamelCase )
lowercase : Tuple = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# model
lowercase : int = chkpt['''models'''][0]
lowercase : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase : Union[str, Any] = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase : int = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase, _UpperCamelCase )
lowercase : str = FSMTConfig.from_pretrained(_UpperCamelCase )
lowercase : str = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
# save
lowercase : List[Any] = os.path.join(_UpperCamelCase, _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase, _UpperCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 173
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : List[str]=16 , snake_case__ : Tuple=13 , snake_case__ : Dict=7 , snake_case__ : List[Any]=14 , snake_case__ : List[Any]=10 , snake_case__ : Dict=19 , snake_case__ : List[str]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : str=True , snake_case__ : int=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=4 , snake_case__ : Dict=4 , snake_case__ : int="gelu" , snake_case__ : Dict=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=[1, 2, 3, 4, 5] , snake_case__ : Optional[int]=25 , snake_case__ : Dict=5 , ):
lowerCamelCase_ : Dict =d_model
lowerCamelCase_ : int =parent
lowerCamelCase_ : Optional[Any] =batch_size
lowerCamelCase_ : int =prediction_length
lowerCamelCase_ : Optional[int] =context_length
lowerCamelCase_ : Any =cardinality
lowerCamelCase_ : List[str] =num_time_features
lowerCamelCase_ : List[Any] =lags_sequence
lowerCamelCase_ : Optional[int] =embedding_dimension
lowerCamelCase_ : Union[str, Any] =is_training
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : str =num_hidden_layers
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : Any =intermediate_size
lowerCamelCase_ : Union[str, Any] =hidden_act
lowerCamelCase_ : Optional[int] =hidden_dropout_prob
lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase_ : List[Any] =context_length
lowerCamelCase_ : str =prediction_length + label_length
lowerCamelCase_ : int =label_length
lowerCamelCase_ : Union[str, Any] =moving_average
lowerCamelCase_ : str =autocorrelation_factor
def UpperCAmelCase__ ( self : Any ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[Any] =config.context_length + max(config.lags_sequence )
lowerCamelCase_ : Any =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ : List[Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ : Any =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ : Tuple =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ : Optional[Any] =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ : Any ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =self.get_config()
lowerCamelCase_ : List[Any] =self.prepare_autoformer_inputs_dict(snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : str =AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval()
lowerCamelCase_ : int =model(**snake_case__ )
lowerCamelCase_ : str =outputs.encoder_last_hidden_state
lowerCamelCase_ : Optional[Any] =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Tuple =model.get_encoder()
encoder.save_pretrained(snake_case__ )
lowerCamelCase_ : Any =AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =model.create_network_inputs(**snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ : Dict =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ : int =encoder(inputs_embeds=snake_case__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase_ : str =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ : Optional[int] =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ : Any =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ : Optional[Any] =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[str] =model.get_decoder()
decoder.save_pretrained(snake_case__ )
lowerCamelCase_ : str =AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ : List[str] =decoder(
trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCAmelCase :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :int = False
_UpperCAmelCase :int = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Dict = False
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : List[str] =AutoformerModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : str =model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =inspect.signature(getattr(snake_case__ , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ : Optional[Any] =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
lowerCamelCase_ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[Any] =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : List[str] =getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCamelCase_ : Dict =getattr(self.model_tester , "decoder_seq_length" , snake_case__ )
lowerCamelCase_ : List[Any] =getattr(self.model_tester , "encoder_seq_length" , snake_case__ )
lowerCamelCase_ : Optional[Any] =getattr(self.model_tester , "d_model" , snake_case__ )
lowerCamelCase_ : List[str] =getattr(self.model_tester , "num_attention_heads" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ : str =True
lowerCamelCase_ : int =False
lowerCamelCase_ : Any =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : str =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ : Optional[Any] =len(snake_case__ )
lowerCamelCase_ : List[Any] =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case__ , snake_case__ )
# decoder attentions
lowerCamelCase_ : Union[str, Any] =outputs.decoder_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ : Tuple =outputs.cross_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ : Tuple =True
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Dict =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 2 , len(snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_retain_grad_hidden_states_attentions()
def _snake_case ( lowerCamelCase__ : Tuple="train-batch.pt" ) -> Any:
lowerCamelCase_ : Tuple =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCamelCase__ , repo_type="dataset" )
lowerCamelCase_ : List[Any] =torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
return batch
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : List[str] =prepare_batch()
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase_ : Union[str, Any] =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Dict =torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Optional[int] =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase_ : List[Any] =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Dict =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase_ : Tuple =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case__ )
lowerCamelCase_ : List[str] =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case__ )
lowerCamelCase_ : Any =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
| 144
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :str = LayoutLMTokenizer
_UpperCAmelCase :int = LayoutLMTokenizerFast
_UpperCAmelCase :Union[str, Any] = True
_UpperCAmelCase :List[str] = True
def _snake_case ( self ):
super().setUp()
lowercase__: Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , **_UpperCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = '''UNwant\u00E9d,running'''
lowercase__: Optional[Any] = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Dict = self.tokenizer_class(self.vocab_file )
lowercase__: Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ):
pass
| 2
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> int:
if "emb" in name:
_UpperCAmelCase : Any = name.replace("""emb""", """model.decoder.embed_tokens""" )
if "transformer" in name:
_UpperCAmelCase : Any = name.replace("""transformer""", """model.decoder""" )
if "cross_attention" in name:
_UpperCAmelCase : List[Any] = name.replace("""cross_attention""", """encoder_attn""" )
if "linear1" in name:
_UpperCAmelCase : List[Any] = name.replace("""linear1""", """fc1""" )
if "linear2" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""linear2""", """fc2""" )
if "norm1" in name:
_UpperCAmelCase : List[Any] = name.replace("""norm1""", """self_attn_layer_norm""" )
if "norm_cross" in name:
_UpperCAmelCase : Any = name.replace("""norm_cross""", """encoder_attn_layer_norm""" )
if "norm2" in name:
_UpperCAmelCase : str = name.replace("""norm2""", """final_layer_norm""" )
if "out_norm" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""out_norm""", """model.decoder.layer_norm""" )
if "linears" in name:
_UpperCAmelCase : str = name.replace("""linears""", """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCAmelCase : Any = name.replace("""condition_provider.conditioners.description.output_proj""", """enc_to_dec_proj""" )
return name
def UpperCamelCase ( _lowerCAmelCase : OrderedDict, _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
_UpperCAmelCase : List[Any] = list(state_dict.keys() )
_UpperCAmelCase : Dict = {}
for key in keys:
_UpperCAmelCase : Dict = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCAmelCase : Dict = val[:hidden_size, :]
_UpperCAmelCase : Dict = val[hidden_size : 2 * hidden_size, :]
_UpperCAmelCase : int = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCAmelCase : Dict = val
else:
_UpperCAmelCase : Optional[int] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_UpperCAmelCase : Optional[int] = 1024
_UpperCAmelCase : str = 24
_UpperCAmelCase : Dict = 16
elif checkpoint == "medium":
_UpperCAmelCase : Union[str, Any] = 1536
_UpperCAmelCase : str = 48
_UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
_UpperCAmelCase : List[Any] = 2048
_UpperCAmelCase : Optional[Any] = 48
_UpperCAmelCase : Any = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase, ffn_dim=hidden_size * 4, num_hidden_layers=_lowerCAmelCase, num_attention_heads=_lowerCAmelCase, )
return config
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Any=None, _lowerCAmelCase : List[Any]="cpu" ) -> str:
_UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(_lowerCAmelCase, device=_lowerCAmelCase )
_UpperCAmelCase : Tuple = decoder_config_from_checkpoint(_lowerCAmelCase )
_UpperCAmelCase : Any = fairseq_model.lm.state_dict()
_UpperCAmelCase , _UpperCAmelCase : str = rename_state_dict(
_lowerCAmelCase, hidden_size=decoder_config.hidden_size )
_UpperCAmelCase : Union[str, Any] = TaEncoderModel.from_pretrained("""t5-base""" )
_UpperCAmelCase : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_UpperCAmelCase : int = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCAmelCase , _UpperCAmelCase : str = decoder.load_state_dict(_lowerCAmelCase, strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCAmelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase, audio_encoder=_lowerCAmelCase, decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
_UpperCAmelCase : List[Any] = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 )
_UpperCAmelCase : Any = input_ids.reshape(2 * 4, -1 )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(input_ids=_lowerCAmelCase, decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("""t5-base""" )
_UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""", padding_side="""left""" )
_UpperCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=_lowerCAmelCase, tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
_UpperCAmelCase : Any = 2048
_UpperCAmelCase : Optional[int] = 2048
# set other default generation config params
_UpperCAmelCase : int = int(30 * audio_encoder.config.frame_rate )
_UpperCAmelCase : int = True
_UpperCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 246
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCamelCase__ : str = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase__ : List[Any] = re.compile(r'''([a-z\d])([A-Z])''')
lowerCamelCase__ : int = re.compile(r'''(?<!_)_(?!_)''')
lowerCamelCase__ : List[str] = re.compile(r'''(_{2,})''')
lowerCamelCase__ : List[Any] = r'''^\w+(\.\w+)*$'''
lowerCamelCase__ : str = r'''<>:/\|?*'''
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> str:
_UpperCAmelCase : Any = _uppercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
_UpperCAmelCase : Tuple = _lowercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
return name.lower()
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = _single_underscore_re.split(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != """""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> str:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ) -> int:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, _lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}'''
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None ) -> List[Any]:
_UpperCAmelCase : Optional[int] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase : int = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
return f'''{filepath}*'''
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any=None, _lowerCAmelCase : int=None ) -> str:
_UpperCAmelCase : Union[str, Any] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if shard_lengths:
_UpperCAmelCase : List[str] = len(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
_UpperCAmelCase : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 246
| 1
|
import operator as op
UpperCAmelCase_ = 'scaler.pt'
UpperCAmelCase_ = 'pytorch_model'
UpperCAmelCase_ = 'random_states'
UpperCAmelCase_ = 'optimizer'
UpperCAmelCase_ = 'scheduler'
UpperCAmelCase_ = 'pytorch_model.bin'
UpperCAmelCase_ = 'pytorch_model.bin.index.json'
UpperCAmelCase_ = 'model.safetensors'
UpperCAmelCase_ = 'model.safetensors.index.json'
UpperCAmelCase_ = '1.10.2'
UpperCAmelCase_ = 'py38'
UpperCAmelCase_ = '4.17.0'
UpperCAmelCase_ = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCAmelCase_ = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCAmelCase_ = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCAmelCase_ = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCAmelCase_ = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCAmelCase_ = '2.0.1'
UpperCAmelCase_ = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCAmelCase_ = ['default', 'reduce-overhead', 'max-autotune']
UpperCAmelCase_ = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase_ = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCAmelCase_ = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCAmelCase_ = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 369
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase__ : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase__ : str = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids, __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase__ : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = dc.update(1 )
UpperCamelCase__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = dc.update(2 )
UpperCamelCase__ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = dc.update(3 )
UpperCamelCase__ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 247
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
if isinstance(a__ , a__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(a__ , a__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a__ ) for e in binary )
return "0b" + "".join(str(a__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A : Dict = logging.get_logger(__name__)
@add_end_docstrings(_A )
class __UpperCamelCase ( _A ):
def __init__(self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : int):
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None):
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one")
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=None):
A = load_image(__SCREAMING_SNAKE_CASE)
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F"""Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE)} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation.")
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
A = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(__SCREAMING_SNAKE_CASE).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
A = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
A = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""")
else:
A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=None):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __SCREAMING_SNAKE_CASE)
and all(x is None for x in model_inputs["input_ids"])
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name)
A = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return model_outputs
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str):
A = []
for output_ids in model_outputs:
A = {
"generated_text": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE)
return records
| 57
|
"""simple docstring"""
__A : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 57
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : torch.FloatTensor
class _A ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = layers_per_block
SCREAMING_SNAKE_CASE_ : int = torch.nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = output_channel
SCREAMING_SNAKE_CASE_ : Any = block_out_channels[i]
SCREAMING_SNAKE_CASE_ : str = i == len(_SCREAMING_SNAKE_CASE ) - 1
SCREAMING_SNAKE_CASE_ : Dict = get_down_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
self.down_blocks.append(_SCREAMING_SNAKE_CASE )
# mid
SCREAMING_SNAKE_CASE_ : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# out
SCREAMING_SNAKE_CASE_ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6 )
SCREAMING_SNAKE_CASE_ : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE_ : Tuple = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ : str = nn.Convad(block_out_channels[-1] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = x
SCREAMING_SNAKE_CASE_ : int = self.conv_in(_SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
# middle
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# middle
SCREAMING_SNAKE_CASE_ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : List[Any] = down_block(_SCREAMING_SNAKE_CASE )
# middle
SCREAMING_SNAKE_CASE_ : List[str] = self.mid_block(_SCREAMING_SNAKE_CASE )
# post-process
SCREAMING_SNAKE_CASE_ : List[Any] = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self.conv_act(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class _A ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE="group" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = layers_per_block
SCREAMING_SNAKE_CASE_ : str = nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ : str = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# up
SCREAMING_SNAKE_CASE_ : List[Any] = list(reversed(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = output_channel
SCREAMING_SNAKE_CASE_ : List[Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i == len(_SCREAMING_SNAKE_CASE ) - 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_up_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , resnet_time_scale_shift=_SCREAMING_SNAKE_CASE , )
self.up_blocks.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ : Tuple = SpatialNorm(block_out_channels[0] , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6 )
SCREAMING_SNAKE_CASE_ : int = nn.SiLU()
SCREAMING_SNAKE_CASE_ : Dict = nn.Convad(block_out_channels[0] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = z
SCREAMING_SNAKE_CASE_ : str = self.conv_in(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
# middle
SCREAMING_SNAKE_CASE_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# middle
SCREAMING_SNAKE_CASE_ : int = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : str = up_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ : Dict = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.conv_norm_out(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = self.conv_act(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class _A ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="random" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = n_e
SCREAMING_SNAKE_CASE_ : List[Any] = vq_embed_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = beta
SCREAMING_SNAKE_CASE_ : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ : List[str] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.used.shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ : List[str] = self.re_embed
SCREAMING_SNAKE_CASE_ : Optional[int] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
SCREAMING_SNAKE_CASE_ : int = n_e
SCREAMING_SNAKE_CASE_ : Any = sane_index_shape
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
SCREAMING_SNAKE_CASE_ : List[Any] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.used.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ : List[str] = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ : int = self.unknown_index
return new.reshape(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.used.to(_SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _SCREAMING_SNAKE_CASE )
return back.reshape(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ : Tuple = torch.argmin(torch.cdist(_SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.embedding(_SCREAMING_SNAKE_CASE ).view(z.shape )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ : Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ : Optional[int] = self.remap_to_used(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.remap is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unmap_to_all(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ : int = self.embedding(_SCREAMING_SNAKE_CASE )
if shape is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = z_q.view(_SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ : int = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ : Dict = deterministic
SCREAMING_SNAKE_CASE_ : Dict = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = randn_tensor(
self.mean.shape , generator=_SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.mean + self.std * sample
return x
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.mean
| 253
|
lowerCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 253
| 1
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 60
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , )-> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Any:
UpperCamelCase_ = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> str:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ :Any = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_lowercase ):
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_lowercase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self )-> int:
pass
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = "add"
with self.assertRaises(_lowercase ):
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
def lowerCAmelCase( )-> Any:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowercase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_lowercase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
UpperCamelCase_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 60
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> int:
UpperCamelCase : List[str] = {}
if train_file is not None:
UpperCamelCase : List[Any] = [train_file]
if eval_file is not None:
UpperCamelCase : Dict = [eval_file]
if test_file is not None:
UpperCamelCase : Any = [test_file]
UpperCamelCase : str = datasets.load_dataset("csv" , data_files=_lowerCAmelCase )
UpperCamelCase : Dict = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase : Union[str, Any] = features_name.pop(_lowerCAmelCase )
UpperCamelCase : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase : Any = {label: i for i, label in enumerate(_lowerCAmelCase )}
UpperCamelCase : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase : Any = {}
if len(_lowerCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase : Dict = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" ) , batched=_lowerCAmelCase , )
elif len(_lowerCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase : Any = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , ) , batched=_lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase : Dict = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase : Optional[int] = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :int = field(metadata={'help': 'Which column contains the label'} )
_UpperCAmelCase :str = field(default=__snake_case , metadata={'help': 'The path of the training file'} )
_UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the development file'} )
_UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the test file'} )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
_UpperCAmelCase :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase :bool = field(default=__snake_case , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def A_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCAmelCase ) -> Dict:
UpperCamelCase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase : List[Any] = TFTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase : Optional[Any] = trainer.evaluate()
UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 52
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
import cmath
import math
def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> complex:
"""simple docstring"""
snake_case = math.radians(_UpperCamelCase )
snake_case = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase )
snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase__ ( _A , _A , **_A ):
'''simple docstring'''
snake_case_ = AutoConfig.from_pretrained(_A , **_A )
snake_case_ = AutoModelForSeqaSeqLM.from_config(_A )
model.save_pretrained(_A )
AutoTokenizer.from_pretrained(_A ).save_pretrained(_A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 187
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase__ : List[Any] = True
from torch.cuda.amp import autocast
lowercase__ : List[Any] = logging.getLogger(__name__)
def lowerCamelCase__ ( _A=None , _A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
lowerCAmelCase_ = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
lowerCAmelCase_ = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__( self : Optional[int] , __lowercase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
snake_case_ = [{"input_values": feature["input_values"]} for feature in features]
snake_case_ = [{"input_ids": feature["labels"]} for feature in features]
snake_case_ = self.processor.pad(
__lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
snake_case_ = self.processor.pad(
labels=__lowercase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
snake_case_ = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
snake_case_ = labels
return batch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : str , __lowercase : nn.Module , __lowercase : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
snake_case_ = self._prepare_inputs(__lowercase )
if self.use_amp:
with autocast():
snake_case_ = self.compute_loss(__lowercase , __lowercase )
else:
snake_case_ = self.compute_loss(__lowercase , __lowercase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
snake_case_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case_ = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
snake_case_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowercase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowercase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowercase )
else:
loss.backward()
return loss.detach()
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
snake_case_ = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
snake_case_ = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
snake_case_ = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(_A ):
snake_case_ = re.sub(_A , "" , batch["sentence"] ).lower() + " "
return batch
snake_case_ = train_dataset.map(_A , remove_columns=["sentence"] )
snake_case_ = eval_dataset.map(_A , remove_columns=["sentence"] )
def extract_all_chars(_A ):
snake_case_ = " ".join(batch["text"] )
snake_case_ = list(set(_A ) )
return {"vocab": [vocab], "all_text": [all_text]}
snake_case_ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=train_dataset.column_names , )
snake_case_ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=eval_dataset.column_names , )
snake_case_ = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
snake_case_ = {v: k for k, v in enumerate(_A )}
snake_case_ = vocab_dict[" "]
del vocab_dict[" "]
snake_case_ = len(_A )
snake_case_ = len(_A )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_A , _A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_A , return_attention_mask=_A )
snake_case_ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
snake_case_ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
snake_case_ = min(len(_A ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(_A ) )
if data_args.max_val_samples is not None:
snake_case_ = eval_dataset.select(range(data_args.max_val_samples ) )
snake_case_ = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_A ):
snake_case_ , snake_case_ = torchaudio.load(batch["path"] )
snake_case_ = resampler(_A ).squeeze().numpy()
snake_case_ = 16000
snake_case_ = batch["text"]
return batch
snake_case_ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
snake_case_ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_A ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
snake_case_ = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_A )
return batch
snake_case_ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
snake_case_ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
# Metric
snake_case_ = datasets.load_metric("wer" )
def compute_metrics(_A ):
snake_case_ = pred.predictions
snake_case_ = np.argmax(_A , axis=-1 )
snake_case_ = processor.tokenizer.pad_token_id
snake_case_ = processor.batch_decode(_A )
# we do not want to group tokens when computing the metrics
snake_case_ = processor.batch_decode(pred.label_ids , group_tokens=_A )
snake_case_ = wer_metric.compute(predictions=_A , references=_A )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
snake_case_ = DataCollatorCTCWithPadding(processor=_A , padding=_A )
# Initialize our Trainer
snake_case_ = CTCTrainer(
model=_A , data_collator=_A , args=_A , compute_metrics=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
snake_case_ = model_args.model_name_or_path
else:
snake_case_ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
snake_case_ = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
snake_case_ = min(_A , len(_A ) )
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_A )
snake_case_ = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
return results
if __name__ == "__main__":
main()
| 187
| 1
|
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ) -> int:
"""simple docstring"""
__lowerCamelCase = -1
__lowerCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCamelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCamelCase = a * b * c
if candidate >= product:
__lowerCamelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 348
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167
| 1
|
'''simple docstring'''
import os
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = os.path.dirname(os.path.realpath(snake_case ) )
snake_case_ = os.path.join(snake_case , "triangle.txt" )
with open(snake_case ) as f:
snake_case_ = f.readlines()
snake_case_ = []
for line in triangle:
snake_case_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(snake_case ) )
a.append(snake_case )
for i in range(1 , len(snake_case ) ):
for j in range(len(a[i] ) ):
snake_case_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case , snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 92
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _snake_case :
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ = len(a__ ) - 1
def lowerCAmelCase__ ( self , a__ ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a__ ) , 5 ) == 1
return output_values
def lowerCAmelCase__ ( self , a__ ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = self.basis_function(a__ )
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase__ ( self , a__ = 0.0_1 ) -> Any:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
snake_case_ = [] # x coordinates of points to plot
snake_case_ = [] # y coordinates of points to plot
snake_case_ = 0.0
while t <= 1:
snake_case_ = self.bezier_curve_function(a__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ = [i[0] for i in self.list_of_points]
snake_case_ = [i[1] for i in self.list_of_points]
plt.plot(
a__ , a__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(a__ , a__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a :List[str] = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a :List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase ( __lowerCAmelCase ) -> str:
if "://" in dataset_path:
SCREAMING_SNAKE_CASE__ : Dict = dataset_path.split("""://""" )[1]
return dataset_path
def _lowercase ( __lowerCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = not is_remote_filesystem(__lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) )
else:
fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase )
def _lowercase ( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = threading.Lock()
| 132
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = frequency_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Any = num_patches + 2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : List[str] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(_a , sampling_rate=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 132
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase : int = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = INVOICE_URL
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
lowercase_ = """What is the placebo?"""
lowercase_ = [
{
"""image""": load_image(lowerCAmelCase_),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = dqa_pipeline(lowerCAmelCase_ , top_k=2)
self.assertEqual(
lowerCAmelCase_ , [
[
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""")
lowercase_ = INVOICE_URL
lowercase_ = """How many cats are there?"""
lowercase_ = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
# We can optionnally pass directly the words and bounding boxes
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = []
lowercase_ = []
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""") , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , [{"""answer""": """us-001"""}])
@require_tf
@unittest.skip("""Document question answering not implemented in TF""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
pass
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = LayoutLMTokenizer
lowerCAmelCase__ : Optional[int] = LayoutLMTokenizerFast
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : int = True
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
pass
| 2
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2
| 1
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase__ :Tuple = mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ :List[str] = max(
mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase__ :str = val
return f[i][j]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase__ :int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase__ :List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowerCAmelCase__ :List[str] = len(_SCREAMING_SNAKE_CASE )
if num_items != len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = (
'The number of weights must be the same as the number of values.\n'
F"But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = (
'All weights must be integers but got weight of '
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :str = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :set = set()
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
optimal_set.add(_SCREAMING_SNAKE_CASE )
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 254
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCAmelCase__ :Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCAmelCase__ :Any = token_dict['token']
lowerCAmelCase__ :int = Tokenizer(Unigram() )
lowerCAmelCase__ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCAmelCase__ :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCAmelCase__ :List[str] = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCAmelCase__ :Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :int = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :int = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = json.loads(self._tokenizer.to_str() )
lowerCAmelCase__ :List[str] = self.special_tokens['unk']['id']
lowerCAmelCase__ :Union[str, Any] = Tokenizer.from_str(json.dumps(__UpperCAmelCase ) )
| 254
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __lowercase ( UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase : List[Any] = """focalnet"""
def __init__(self , A=2_2_4 , A=4 , A=3 , A=9_6 , A=False , A=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1E-4 , A=False , A=False , A=False , A=0.02 , A=1E-5 , A=3_2 , A=None , A=None , **A , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Optional[int] = patch_size
lowerCamelCase_ : int = num_channels
lowerCamelCase_ : Dict = embed_dim
lowerCamelCase_ : Union[str, Any] = use_conv_embed
lowerCamelCase_ : List[Any] = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : Optional[int] = focal_levels
lowerCamelCase_ : Optional[Any] = focal_windows
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Any = mlp_ratio
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : str = use_layerscale
lowerCamelCase_ : str = layerscale_value
lowerCamelCase_ : Optional[int] = use_post_layernorm
lowerCamelCase_ : Any = use_post_layernorm_in_modulation
lowerCamelCase_ : Optional[Any] = normalize_modulator
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : str = encoder_stride
lowerCamelCase_ : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_, lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 318
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 21
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 1
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline
_lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : Optional[int] ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = StableDiffusionControlNetImgaImgPipeline
_lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Optional[Any] ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : List[str] ) -> Dict:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowercase__ ( self : int ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : int ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = """evil space-punk bird"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 70
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__( self : str , __snake_case : Any ) -> str:
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any:
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Optional[Any]:
return self.id
def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]:
self.neighbors.append(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any:
_lowerCAmelCase = weight
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
_lowerCAmelCase = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: List[Any] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 214
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMPipeline
SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
lowercase__ : Any = DDIMScheduler()
lowercase__ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> int:
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Optional[Any] = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = '''cpu'''
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : str = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : str = pipe(**__lowerCAmelCase ).images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__ : Optional[int] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _lowerCAmelCase( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[str]:
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = '''google/ddpm-cifar10-32'''
lowercase__ : List[Any] = UNetaDModel.from_pretrained(__lowerCAmelCase )
lowercase__ : str = DDIMScheduler()
lowercase__ : List[Any] = DDIMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ddim.to(__lowerCAmelCase )
ddim.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Optional[int] = ddim(generator=__lowerCAmelCase , eta=0.0 , output_type='''numpy''' ).images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = '''google/ddpm-ema-bedroom-256'''
lowercase__ : Tuple = UNetaDModel.from_pretrained(__lowerCAmelCase )
lowercase__ : Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCAmelCase )
lowercase__ : List[Any] = DDIMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ddpm.to(__lowerCAmelCase )
ddpm.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = ddpm(generator=__lowerCAmelCase , output_type='''numpy''' ).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Union[str, Any] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 214
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) )
lowerCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase__ ( self : List[str] ):
lowerCAmelCase : List[str] = np.random.randn(3 , 4 )
lowerCAmelCase : str = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Any = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : int = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : List[Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def lowercase__ ( self : str ):
lowerCAmelCase : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) )
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) )
@require_torch
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[Any] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Any = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : int = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = np.random.randn(3 , 4 )
lowerCAmelCase : Tuple = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Any = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) )
lowerCAmelCase : Tuple = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) )
@require_torch
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Dict = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : List[str] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
lowerCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : Optional[Any] = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
lowerCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Optional[Any] = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : int = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Optional[int] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) )
@require_torch
def lowercase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase : str = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = np.random.randn(3 , 4 )
lowerCAmelCase : Union[str, Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
| 138
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A : Dict = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A : Optional[int] = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A : Dict = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[List[List[str]]] , UpperCAmelCase_ : List[List[str]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase_ , hypotheses=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ )
}
| 138
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = k_size // 2
__lowerCamelCase , __lowerCamelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowerCamelCase = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = image.shape[0], image.shape[1]
# dst image height and width
__lowerCamelCase = height - k_size + 1
__lowerCamelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowerCamelCase = zeros((dst_height * dst_width, k_size * k_size) )
__lowerCamelCase = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
__lowerCamelCase = ravel(image[i : i + k_size, j : j + k_size] )
__lowerCamelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
__lowerCamelCase = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = ravel(__lowerCAmelCase )
# reshape and get the dst image
__lowerCamelCase = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : Optional[Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
SCREAMING_SNAKE_CASE__ : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE__ : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE__ : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 354
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered")
def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 339
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : str = tmp_path / "cache"
__magic_name__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : str = ParquetDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = tmp_path / "cache"
__magic_name__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : List[str] = features.copy() if features else default_expected_features
__magic_name__ : Tuple = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Dict = ParquetDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[str] = tmp_path / "cache"
__magic_name__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Any = ParquetDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = parquet_path
elif issubclass(_snake_case , _snake_case ):
__magic_name__ : List[str] = [parquet_path]
__magic_name__ : Dict = tmp_path / "cache"
__magic_name__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Any = ParquetDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Dict=("train",) ) -> str:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case )
for split in splits:
__magic_name__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] ) -> int:
'''simple docstring'''
__magic_name__ : Any = tmp_path / "cache"
__magic_name__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = tmp_path / "cache"
__magic_name__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Dict = features.copy() if features else default_expected_features
__magic_name__ : Optional[Any] = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : str = ParquetDatasetReader({"train": parquet_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if split:
__magic_name__ : Union[str, Any] = {split: parquet_path}
else:
__magic_name__ : Union[str, Any] = "train"
__magic_name__ : List[str] = {"train": parquet_path, "test": parquet_path}
__magic_name__ : Union[str, Any] = tmp_path / "cache"
__magic_name__ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : str = ParquetDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ParquetDatasetWriter(_snake_case , tmp_path / "foo.parquet" )
assert writer.write() > 0
__magic_name__ : Any = pq.ParquetFile(tmp_path / "foo.parquet" )
__magic_name__ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : str = str(shared_datadir / "test_image_rgb.jpg" )
__magic_name__ : Dict = {"image": [image_path]}
__magic_name__ : Dict = Features({"image": Image()} )
__magic_name__ : Any = Dataset.from_dict(_snake_case , features=_snake_case )
__magic_name__ : Union[str, Any] = ParquetDatasetWriter(_snake_case , tmp_path / "foo.parquet" )
assert writer.write() > 0
__magic_name__ : Optional[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ : Optional[Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
assert get_writer_batch_size(_snake_case ) == expected
| 281
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281
| 1
|
import numpy as np
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: List[Any] = LEDTokenizer
_lowercase: Dict = LEDTokenizerFast
_lowercase: List[str] = True
def lowercase__ ( self : Optional[int] ) -> str:
super().setUp()
_lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowercase__ ( self : List[Any] , **__snake_case : str ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : int , **__snake_case : Optional[int] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowercase__ ( self : str ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertNotIn("""labels""" , __snake_case )
self.assertNotIn("""decoder_attention_mask""" , __snake_case )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(text_target=__snake_case , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowercase__ ( self : Tuple ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=__snake_case , truncation=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = ["""A long paragraph for summarization."""]
_lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = tokenizer(text_target=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = inputs["""input_ids"""]
_lowerCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase__ ( self : str ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = ["""Summary of the text.""", """Another summary."""]
_lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case )
_lowerCAmelCase = [[0] * len(__snake_case ) for x in encoded_output["""input_ids"""]]
_lowerCAmelCase = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __snake_case )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
pass
def lowercase__ ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
_lowerCAmelCase = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 70
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327
| 0
|
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
_snake_case = {}
def A ( self : List[Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_snake_case , ' -> ' , ' -> '.join([str(_snake_case ) for j in self.vertex[i]] ) )
def A ( self : Tuple , lowercase : Tuple , lowercase : List[str] ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_snake_case )
else:
# else make a new vertex
_snake_case = [to_vertex]
def A ( self : int ):
'''simple docstring'''
_snake_case = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
def A ( self : str , lowercase : int , lowercase : str ):
'''simple docstring'''
_snake_case = True
print(_snake_case , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 361
|
def a_ ( __lowercase : int = 50_000_000 ) -> int:
_snake_case = set()
_snake_case = int((limit - 24) ** (1 / 2) )
_snake_case = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
_snake_case = primea * primea
for primea in primes:
_snake_case = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case = primea * primea * primea * primea
_snake_case = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F'{solution() = }')
| 130
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( A_):
UpperCamelCase__: List[str] = [True] * limit
UpperCamelCase__: Any = False
UpperCamelCase__: Any = False
UpperCamelCase__: Optional[int] = True
for i in range(3 ,int(limit**0.5 + 1) ,2):
UpperCamelCase__: str = i * 2
while index < limit:
UpperCamelCase__: Dict = False
UpperCamelCase__: Union[str, Any] = index + i
UpperCamelCase__: str = [2]
for i in range(3 ,A_ ,2):
if is_prime[i]:
primes.append(A_)
return primes
def lowerCAmelCase_ ( A_ = 1_00_00_00):
UpperCamelCase__: List[str] = prime_sieve(A_)
UpperCamelCase__: List[Any] = 0
UpperCamelCase__: List[str] = 0
for i in range(len(A_)):
for j in range(i + length ,len(A_)):
UpperCamelCase__: List[Any] = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__: List[Any] = j - i
UpperCamelCase__: List[str] = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 149
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase = '''focalnet'''
def __init__( self : List[Any], lowerCAmelCase__ : str=2_2_4, lowerCAmelCase__ : Optional[int]=4, lowerCAmelCase__ : Union[str, Any]=3, lowerCAmelCase__ : Any=9_6, lowerCAmelCase__ : Dict=False, lowerCAmelCase__ : List[Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8], lowerCAmelCase__ : Optional[int]=[2, 2, 6, 2], lowerCAmelCase__ : Optional[int]=[2, 2, 2, 2], lowerCAmelCase__ : Tuple=[3, 3, 3, 3], lowerCAmelCase__ : Tuple="gelu", lowerCAmelCase__ : Optional[int]=4.0, lowerCAmelCase__ : List[str]=0.0, lowerCAmelCase__ : int=0.1, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : Optional[Any]=1e-4, lowerCAmelCase__ : str=False, lowerCAmelCase__ : Tuple=False, lowerCAmelCase__ : Any=False, lowerCAmelCase__ : List[str]=0.02, lowerCAmelCase__ : int=1e-5, lowerCAmelCase__ : Dict=3_2, lowerCAmelCase__ : List[Any]=None, lowerCAmelCase__ : Any=None, **lowerCAmelCase__ : Union[str, Any], ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : List[str] = embed_dim
_UpperCamelCase : int = use_conv_embed
_UpperCamelCase : List[Any] = hidden_sizes
_UpperCamelCase : List[str] = depths
_UpperCamelCase : Dict = focal_levels
_UpperCamelCase : List[Any] = focal_windows
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : Any = mlp_ratio
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = drop_path_rate
_UpperCamelCase : Tuple = use_layerscale
_UpperCamelCase : Dict = layerscale_value
_UpperCamelCase : Tuple = use_post_layernorm
_UpperCamelCase : Optional[int] = use_post_layernorm_in_modulation
_UpperCamelCase : Optional[int] = normalize_modulator
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : str = encoder_stride
_UpperCamelCase : Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1, len(self.depths ) + 1 )]
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_, out_indices=lowerCamelCase_, stage_names=self.stage_names )
| 361
|
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowercase , _lowercase , _lowercase ):
# Initialise PyTorch model
_UpperCamelCase : List[Any] = MobileBertConfig.from_json_file(_lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase : List[str] = MobileBertForPreTraining(_lowercase )
# Load weights from tf checkpoint
_UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 128
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 297
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
with open(a__ ) as metadata_file:
_lowerCAmelCase : Dict = json.load(a__ )
_lowerCAmelCase : Optional[int] = LukeConfig(use_entity_aware_attention=a__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : str = torch.load(a__ , map_location='cpu' )
# Load the entity vocab file
_lowerCAmelCase : Dict = load_entity_vocab(a__ )
_lowerCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Any = AddedToken('<ent>' , lstrip=a__ , rstrip=a__ )
_lowerCAmelCase : str = AddedToken('<ent2>' , lstrip=a__ , rstrip=a__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a__ )
with open(os.path.join(a__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(a__ , a__ )
_lowerCAmelCase : int = LukeTokenizer.from_pretrained(a__ )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : List[str] = state_dict['embeddings.word_embeddings.weight']
_lowerCAmelCase : List[str] = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : str = f'encoder.layer.{layer_index}.attention.self.'
_lowerCAmelCase : Any = state_dict[prefix + matrix_name]
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
_lowerCAmelCase : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : int = state_dict['entity_embeddings.entity_embeddings.weight']
_lowerCAmelCase : int = entity_emb[entity_vocab['[MASK]']]
_lowerCAmelCase : Optional[int] = LukeModel(config=a__ ).eval()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model.load_state_dict(a__ , strict=a__ )
if not (len(a__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(a__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_lowerCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(a__ , task='entity_classification' )
_lowerCAmelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_lowerCAmelCase : List[Any] = (3_9, 4_2)
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ , entity_spans=[span] , add_prefix_space=a__ , return_tensors='pt' )
_lowerCAmelCase : Tuple = model(**a__ )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : int = torch.Size((1, 4_2, 1_0_2_4) )
_lowerCAmelCase : List[str] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_lowerCAmelCase : Tuple = torch.Size((1, 4_2, 7_6_8) )
_lowerCAmelCase : Any = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 1_0_2_4) )
_lowerCAmelCase : Optional[Any] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_lowerCAmelCase : int = torch.Size((1, 1, 7_6_8) )
_lowerCAmelCase : List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(a__ ) )
model.save_pretrained(a__ )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = {}
with open(a__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(a__ ):
_lowerCAmelCase , _lowerCAmelCase : Dict = line.rstrip().split('\t' )
_lowerCAmelCase : Optional[int] = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase : str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 359
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.