code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCamelCase )
__snake_case = size if size is not None else {"""shortest_edge""": 2_56}
__snake_case = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
__snake_case = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
__snake_case = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__snake_case = get_resize_output_image_size(_UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
__snake_case = get_size_dict(_UpperCamelCase )
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> List[Any]:
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
__snake_case = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
__snake_case = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
__snake_case = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 268
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase=sys.maxsize ) -> Any:
"""simple docstring"""
__snake_case = """bilinear"""
__snake_case = max_size
__snake_case = short_edge_length
def __call__( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
for img in imgs:
__snake_case , __snake_case = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__snake_case = size * 1.0 / min(_UpperCamelCase , _UpperCamelCase )
if h < w:
__snake_case , __snake_case = size, scale * w
else:
__snake_case , __snake_case = scale * h, size
if max(_UpperCamelCase , _UpperCamelCase ) > self.max_size:
__snake_case = self.max_size * 1.0 / max(_UpperCamelCase , _UpperCamelCase )
__snake_case = newh * scale
__snake_case = neww * scale
__snake_case = int(neww + 0.5 )
__snake_case = int(newh + 0.5 )
if img.dtype == np.uinta:
__snake_case = Image.fromarray(_UpperCamelCase )
__snake_case = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__snake_case = np.asarray(_UpperCamelCase )
else:
__snake_case = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__snake_case = nn.functional.interpolate(
_UpperCamelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCamelCase ).squeeze(0 )
img_augs.append(_UpperCamelCase )
return img_augs
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__snake_case = cfg.INPUT.FORMAT
__snake_case = cfg.SIZE_DIVISIBILITY
__snake_case = cfg.PAD_VALUE
__snake_case = cfg.INPUT.MAX_SIZE_TEST
__snake_case = cfg.MODEL.DEVICE
__snake_case = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__snake_case = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__snake_case = lambda _UpperCamelCase : (x - self.pixel_mean) / self.pixel_std
def a ( self , _UpperCamelCase ) -> int:
"""simple docstring"""
__snake_case = tuple(max(_UpperCamelCase ) for s in zip(*[img.shape for img in images] ) )
__snake_case = [im.shape[-2:] for im in images]
__snake_case = [
nn.functional.pad(
_UpperCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCamelCase , _UpperCamelCase )
]
return torch.stack(_UpperCamelCase ), torch.tensor(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__snake_case = [images]
if single_image:
assert len(_UpperCamelCase ) == 1
for i in range(len(_UpperCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCamelCase , images.pop(_UpperCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCamelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__snake_case = torch.tensor([im.shape[:2] for im in images] )
__snake_case = self.aug(_UpperCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case = [self.normalizer(_UpperCamelCase ) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case = self.pad(_UpperCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case = torch.true_divide(_UpperCamelCase , _UpperCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( __A :Union[str, Any] ,__A :List[Any] ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( __A :Tuple ,__A :Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case = box_size
tensor[:, 0].clamp_(min=0 ,max=__A )
tensor[:, 1].clamp_(min=0 ,max=__A )
tensor[:, 2].clamp_(min=0 ,max=__A )
tensor[:, 3].clamp_(min=0 ,max=__A )
| 268
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "mask2former"
a_ = ["swin"]
a_ = {"hidden_size": "hidden_dim"}
def __init__( self : Any , __A : Optional[Dict] = None , __A : int = 2_5_6 , __A : int = 2_5_6 , __A : int = 2_5_6 , __A : int = 1_0_2_4 , __A : str = "relu" , __A : int = 6 , __A : int = 1_0 , __A : int = 8 , __A : float = 0.0 , __A : int = 2_0_4_8 , __A : bool = False , __A : bool = False , __A : int = 4 , __A : int = 2_5_5 , __A : int = 1_0_0 , __A : float = 0.1 , __A : float = 2.0 , __A : float = 5.0 , __A : float = 5.0 , __A : int = 1_2_5_4_4 , __A : float = 3.0 , __A : float = 0.7_5 , __A : float = 0.0_2 , __A : float = 1.0 , __A : bool = True , __A : List[int] = [4, 8, 1_6, 3_2] , __A : bool = None , **__A : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case__ : List[Any] = CONFIG_MAPPING["swin"](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__A , __A ):
snake_case__ : Tuple = backbone_config.pop("model_type" )
snake_case__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Optional[int] = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case__ : Any = backbone_config
snake_case__ : Dict = feature_size
snake_case__ : List[str] = mask_feature_size
snake_case__ : Any = hidden_dim
snake_case__ : Tuple = encoder_feedforward_dim
snake_case__ : Tuple = activation_function
snake_case__ : int = encoder_layers
snake_case__ : Any = decoder_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = dropout
snake_case__ : Tuple = dim_feedforward
snake_case__ : List[Any] = pre_norm
snake_case__ : List[str] = enforce_input_projection
snake_case__ : List[str] = common_stride
snake_case__ : Union[str, Any] = ignore_value
snake_case__ : Union[str, Any] = num_queries
snake_case__ : int = no_object_weight
snake_case__ : Dict = class_weight
snake_case__ : List[Any] = mask_weight
snake_case__ : str = dice_weight
snake_case__ : Optional[int] = train_num_points
snake_case__ : List[str] = oversample_ratio
snake_case__ : Dict = importance_sample_ratio
snake_case__ : int = init_std
snake_case__ : Dict = init_xavier_std
snake_case__ : Tuple = use_auxiliary_loss
snake_case__ : Optional[Any] = feature_strides
snake_case__ : Tuple = output_auxiliary_logits
snake_case__ : str = decoder_layers
super().__init__(**__A )
@classmethod
def _lowercase ( cls : Any , __A : PretrainedConfig , **__A : Dict ):
return cls(
backbone_config=__A , **__A , )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
snake_case__ : int = self.backbone_config.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 25
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase: Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase( __a : Dict ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
a__ =[image]
if isinstance(image[0] , PIL.Image.Image ):
a__ , a__ =image[0].size
a__ , a__ =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a__ =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a__ =np.concatenate(UpperCamelCase__ , axis=0 )
a__ =np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
a__ =image.transpose(0 , 3 , 1 , 2 )
a__ =2.0 * image - 1.0
a__ =torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
a__ =torch.cat(UpperCamelCase__ , dim=0 )
return image
def _lowercase( __a : int ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
a__ =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
a__ , a__ =mask[0].size
a__ , a__ =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
a__ =np.concatenate(UpperCamelCase__ , axis=0 )
a__ =mask.astype(np.floataa ) / 255.0
a__ =0
a__ =1
a__ =torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
a__ =torch.cat(UpperCamelCase__ , dim=0 )
return mask
class lowercase_ (lowercase_ ):
snake_case =42
snake_case =42
def __init__( self , lowercase_ , lowercase_) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 250 , lowercase_ = 0.0 , lowercase_ = 10 , lowercase_ = 10 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Dict:
a__ =image
a__ =_preprocess_image(lowercase_)
a__ =original_image.to(device=self.device , dtype=self.unet.dtype)
a__ =_preprocess_mask(lowercase_)
a__ =mask_image.to(device=self.device , dtype=self.unet.dtype)
a__ =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
a__ =original_image.shape
a__ =randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device)
a__ =eta
a__ =self.scheduler.timesteps[0] + 1
a__ =generator[0] if isinstance(lowercase_ , lowercase_) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
a__ =self.unet(lowercase_ , lowercase_).sample
# compute previous image: x_t -> x_t-1
a__ =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a__ =self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_)
a__ =t
a__ =(image / 2 + 0.5).clamp(0 , 1)
a__ =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__ =self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 20
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowercase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE : str = "text"
__SCREAMING_SNAKE_CASE : str = "labels"
def a ( self , snake_case ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.label_schema.copy()
snake_case_ = features[self.label_column]
snake_case_ = label_schema
return task_template
@property
def a ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 362
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self : List[str] , A : Any , A : Optional[Any]=99 , A : Tuple=13 , A : Optional[Any]=7 , A : Tuple=9 , A : List[Any]=True , A : Union[str, Any]=True , A : Optional[Any]=False , A : Dict=32 , A : Tuple=5 , A : List[Any]=4 , A : Union[str, Any]=37 , A : Dict=8 , A : List[Any]=0.1 , A : int=0.0_0_2 , A : str=1 , A : Union[str, Any]=0 , A : Dict=0 , A : Union[str, Any]=None , A : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base')
def _lowerCamelCase ( self : Tuple , A : int , A : Union[str, Any] , A : List[str] , A : Any=None , A : List[Any]=None , A : List[Any]=None , A : int=None , A : Optional[Any]=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A)
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A)
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=A)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(A , A , A)
return config, input_dict
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : str , A : List[Any] , A : Any , A : List[str] , A : str , A : Tuple , A : str , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
input_ids=A , decoder_input_ids=A , attention_mask=A , decoder_attention_mask=A , )
_UpperCAmelCase = model(input_ids=A , decoder_input_ids=A)
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(A) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : Optional[Any] , A : List[str] , A : str , A : List[str] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).get_decoder().to(A).eval()
# first forward pass
_UpperCAmelCase = model(A , use_cache=A)
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A , use_cache=A)
self.parent.assertTrue(len(A) == len(A))
self.parent.assertTrue(len(A) == len(A) + 1)
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCAmelCase = model(A)['last_hidden_state']
_UpperCAmelCase = model(A , past_key_values=A)['last_hidden_state']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).to(A).half().eval()
_UpperCAmelCase = model(**A)['last_hidden_state']
self.parent.assertFalse(torch.isnan(A).any().item())
@require_torch
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase = [0.8, 0.9]
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0]).to(A)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*A)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(A).eval()
model.to(A)
_UpperCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=A),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
}
for attn_name, (name, mask) in zip(A , head_masking.items()):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=A)
_UpperCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=A , return_dict_in_generate=A , **A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=A).to(A)
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=A , legacy=A)
_UpperCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_UpperCAmelCase = tokenizer(A , return_tensors='pt' , padding=A).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
])
# fmt: on
torch.testing.assert_allclose(A , A)
_UpperCAmelCase = model.generate(input_ids.to(A))
_UpperCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertEqual(A , A)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=[1, 16, 4, 4] , _lowerCamelCase=None , ):
lowerCamelCase__ =parent
lowerCamelCase__ =batch_size
lowerCamelCase__ =image_size
lowerCamelCase__ =patch_size
lowerCamelCase__ =num_channels
lowerCamelCase__ =is_training
lowerCamelCase__ =use_labels
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_act
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =type_sequence_label_size
lowerCamelCase__ =initializer_range
lowerCamelCase__ =scope
lowerCamelCase__ =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCamelCase__ =(self.image_size // 32) ** 2
lowerCamelCase__ =num_patches + 1
def _a ( self ):
lowerCamelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ =None
if self.use_labels:
lowerCamelCase__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ =self.get_config()
return config, pixel_values, labels
def _a ( self ):
lowerCamelCase__ ={
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.type_sequence_label_size
lowerCamelCase__ =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase__ =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =config_and_inputs
lowerCamelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : Any = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Optional[Any] = False
A__ : int = False
def _a ( self ):
lowerCamelCase__ =ViTHybridModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _a ( self ):
pass
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(A_ )
lowerCamelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ =[*signature.parameters.keys()]
lowerCamelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ =_config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCamelCase__ =[F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ):
lowerCamelCase__ =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
lowerCamelCase__ =self.default_image_processor
lowerCamelCase__ =prepare_img()
lowerCamelCase__ =image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ =model(**A_ )
# verify the logits
lowerCamelCase__ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase__ =torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ):
lowerCamelCase__ =ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
lowerCamelCase__ =ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
lowerCamelCase__ =prepare_img()
lowerCamelCase__ =image_processor(images=A_ , return_tensors="pt" )
lowerCamelCase__ =model(**A_ )
lowerCamelCase__ =outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCamelCase__ =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 530
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A : List[str] = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = """token-classification"""
def __init__( self , A_ ):
'''simple docstring'''
if type(A_ ) == dict:
SCREAMING_SNAKE_CASE__ = Namespace(**A_ )
SCREAMING_SNAKE_CASE__ = import_module('''tasks''' )
try:
SCREAMING_SNAKE_CASE__ = getattr(A_ , hparams.task_type )
SCREAMING_SNAKE_CASE__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss().ignore_index
super().__init__(A_ , len(self.labels ) , self.mode )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
return self.model(**A_ )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**A_ )
SCREAMING_SNAKE_CASE__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE__ = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A_ )
SCREAMING_SNAKE_CASE__ = torch.load(A_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.read_examples_from_file(args.data_dir , A_ )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.convert_examples_to_features(
A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , A_ )
torch.save(A_ , A_ )
def lowercase_ ( self , A_ , A_ , A_ = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._feature_file(A_ )
logger.info('''Loading features from cached file %s''' , A_ )
SCREAMING_SNAKE_CASE__ = torch.load(A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
"""Compute validation""" ""
SCREAMING_SNAKE_CASE__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs[:2]
SCREAMING_SNAKE_CASE__ = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.argmax(A_ , axis=2 )
SCREAMING_SNAKE_CASE__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(A_ , A_ ),
'''precision''': precision_score(A_ , A_ ),
'''recall''': recall_score(A_ , A_ ),
'''f1''': fa_score(A_ , A_ ),
}
SCREAMING_SNAKE_CASE__ = dict(results.items() )
SCREAMING_SNAKE_CASE__ = results
return ret, preds_list, out_label_list
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(A_ )
SCREAMING_SNAKE_CASE__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(A_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowercase_ ( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=A_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=A_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A : Optional[Any] = parser.parse_args()
_A : Any = NERTransformer(args)
_A : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A : Optional[int] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 100
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Any, UpperCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
A__ = AlbertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 562
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=sys.maxsize ) -> str:
A__ = "bilinear"
A__ = max_size
A__ = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
A__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0, max=UpperCAmelCase_ )
| 562
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'ViltImageProcessor'
_lowerCamelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Any = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
_snake_case : int = self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ):
_snake_case : int = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
_snake_case : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ )
encoding.update(lowercase_ )
return encoding
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.tokenizer.model_input_names
_snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 670
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , )
| 670
| 1
|
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
return base * power(lowerCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
UpperCamelCase = int(input('''Enter the base: ''').strip())
UpperCamelCase = int(input('''Enter the exponent: ''').strip())
UpperCamelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCamelCase = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 144
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
lowerCamelCase_ : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def UpperCAmelCase_ (self : Any ) -> List[str]:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class lowerCamelCase__ :
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ : Optional[str] = field(default=UpperCAmelCase, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase_ : Optional[int] = field(
default=5, metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
}, )
lowerCamelCase_ : Optional[int] = field(
default=UpperCAmelCase, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=UpperCAmelCase, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowerCamelCase_ : float = field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
def UpperCAmelCase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.train_file is not None:
lowerCamelCase_ : List[str] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ : Any = [json.loads(lowerCamelCase__ ) for line in f.read().splitlines() if (len(lowerCamelCase__ ) > 0 and not line.isspace())]
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowerCamelCase_ : str = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCamelCase__ )
def _a ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
lowerCamelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
lowerCamelCase_ : List[str] = {}
if data_args.train_file is not None:
lowerCamelCase_ : int = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : str = data_args.validation_file
lowerCamelCase_ : str = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowerCamelCase_ : Optional[int] = 'text'
lowerCamelCase_ : List[str] = load_dataset(lowerCamelCase__ , data_files=lowerCamelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
lowerCamelCase_ : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
lowerCamelCase_ : Optional[int] = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowerCamelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_config(lowerCamelCase__ )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Dict = datasets['train'].column_names
else:
lowerCamelCase_ : List[str] = datasets['validation'].column_names
lowerCamelCase_ : Any = 'text' if 'text' in column_names else column_names[0]
lowerCamelCase_ : List[Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase__ ):
# Remove empty lines
lowerCamelCase_ : List[str] = [line for line in examples['text'] if len(lowerCamelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ : Any = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : Dict = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ : Tuple = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : int = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Tuple = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ : Optional[Any] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : List[str] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ : int = model_args.model_name_or_path
else:
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Tuple = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ : Optional[Any] = trainer.evaluate()
lowerCamelCase_ : Optional[int] = math.exp(eval_output['eval_loss'] )
lowerCamelCase_ : Dict = perplexity
lowerCamelCase_ : Dict = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def _a ( lowerCamelCase__ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 144
| 1
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : int=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : str=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=99 , __lowerCamelCase : Optional[Any]=24 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : str=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1000 , ):
snake_case__ : Optional[int] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Tuple = use_labels
snake_case__ : Any = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : Tuple = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : Tuple = scope
snake_case__ : Any = range_bbox
def _lowerCAmelCase ( self : Dict ):
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ : Union[str, Any] = bbox[i, j, 3]
snake_case__ : Optional[int] = bbox[i, j, 1]
snake_case__ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ : str = bbox[i, j, 2]
snake_case__ : List[Any] = bbox[i, j, 0]
snake_case__ : int = t
snake_case__ : List[str] = None
if self.use_input_mask:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self : Union[str, Any] ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , ):
snake_case__ : Optional[Any] = LiltModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Tuple = model(__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
snake_case__ : str = model(__lowerCamelCase , bbox=__lowerCamelCase , token_type_ids=__lowerCamelCase )
snake_case__ : Any = model(__lowerCamelCase , bbox=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ):
snake_case__ : str = self.num_labels
snake_case__ : List[Any] = LiltForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Tuple = model(
__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , ):
snake_case__ : Optional[Any] = LiltForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(
__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = config_and_inputs
snake_case__ : Union[str, Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ):
return True
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : List[Any] = LiltModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Tuple = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : str ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def _lowerCAmelCase ( self : int ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = LiltModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Any ):
snake_case__ : Union[str, Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(__lowerCamelCase )
snake_case__ : List[str] = torch.tensor([[1, 2]] , device=__lowerCamelCase )
snake_case__ : Dict = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase )
snake_case__ : int = torch.Size([1, 2, 768] )
snake_case__ : Dict = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCamelCase , atol=1E-3 ) )
| 270
|
'''simple docstring'''
import functools
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
snake_case__ : Any = len(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = len(__SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case__ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __SCREAMING_SNAKE_CASE ) , 1 + min_distance(__SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = use_mc_token_ids
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
if self.use_mc_token_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = CTRLModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowerCamelCase_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = CTRLLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCamelCase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = CTRLForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (CTRLLMHeadModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = CTRLModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCAmelCase_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = CTRLModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(lowerCAmelCase_ )
lowerCamelCase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=lowerCAmelCase_ ) # Legal the president is
lowerCamelCase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase_ = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 712
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = max_length
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = frequency_stride
lowerCamelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase_ = frequency_out_dimension * time_out_dimension
lowerCamelCase_ = num_patches + 2
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, input_values, labels
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ASTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['input_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' ,filename='sample_audio.flac' ,repo_type='dataset' )
lowerCamelCase_ ,lowerCamelCase_ = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.default_feature_extractor
lowerCamelCase_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_feature_extractor
lowerCamelCase_ ,lowerCamelCase_ = prepare_audio()
lowerCamelCase_ = audio.squeeze().numpy()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 384
| 0
|
import qiskit
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A_ : Dict = qiskit.QuantumCircuit(_lowerCAmelCase ,_lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
A_ : int = qiskit.execute(_lowerCAmelCase ,_lowerCAmelCase ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 569
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
A_ : Tuple = inspect.getfile(accelerate.test_utils )
A_ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A_ : Dict = test_metrics
@require_cpu
def _lowerCamelCase ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _lowerCamelCase ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
A_ : str = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
| 569
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
debug_launcher(test_script.main )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 402
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__a: str = get_tests_dir('''fixtures/test_sentencepiece.model''')
__a: Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
__a: Tuple = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = CamembertTokenizer
_lowerCamelCase = CamembertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = """<pad>"""
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
# fmt: off
_UpperCAmelCase = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCamelCase , )
| 402
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
UpperCamelCase = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = CamembertTokenizer
_snake_case : Optional[int] = CamembertTokenizerFast
_snake_case : Tuple = True
_snake_case : Dict = True
def __a ( self :Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :Union[str, Any] = CamembertTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = """<pad>"""
UpperCamelCase__ :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase__ ) , 10_04 )
def __a ( self :Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = CamembertTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :List[Any] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Dict = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase__ :str = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Dict = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :str = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_rust_tokenizer()
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :List[str] ):
# fmt: off
UpperCamelCase__ :Any = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase__ :Tuple = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCamelCase__ , )
| 45
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__snake_case = logging.get_logger(__name__)
@dataclass
class _a ( __a ):
"""simple docstring"""
A_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int , **lowercase_ : int ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase_ = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
A_ = field(default=__a , metadata={'''help''': '''Trace the models using torchscript'''} )
A_ = field(default=__a , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
A_ = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ = torch.device("""cpu""" )
lowercase_ = 0
elif is_torch_tpu_available():
lowercase_ = xm.xla_device()
lowercase_ = 0
else:
lowercase_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.n_gpu > 0
| 451
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PoolFormerFeatureExtractor']
_snake_case = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 389
|
"""simple docstring"""
_snake_case = 6_5521
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = 1
_a : Optional[int] = 0
for plain_chr in plain_text:
_a : Dict = (a + ord(UpperCamelCase__ )) % MOD_ADLER
_a : List[Any] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 389
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case__ : List[str] = (3, 9, -1_1, 0, 7, 5, 1, -1)
snake_case__ : int = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class _a :
"""simple docstring"""
snake_case =42
snake_case =42
class _a :
"""simple docstring"""
def __init__( self , _snake_case ):
_UpperCAmelCase =None
for i in sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ):
_UpperCAmelCase =Node(_lowerCAmelCase , self.head )
def __iter__( self ):
_UpperCAmelCase =self.head
while node:
yield node.data
_UpperCAmelCase =node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(_lowerCAmelCase ) for node in self] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Any:
return SortedLinkedList(list(_lowerCamelCase ) + list(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 709
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
snake_case__ : Tuple = logging.get_logger(__name__)
class _a ( A__ ):
"""simple docstring"""
def __init__( self , *_snake_case , **_snake_case ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 592
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
"""simple docstring"""
@staticmethod
def _a ( *_A , **_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase : List[Any] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase : Any = len(_A )
self.assertGreater(_A , 0 )
self.assertEqual(
_A , [
{
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
}
for i in range(_A )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a ( self ):
'''simple docstring'''
pass
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase : List[str] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
] , )
UpperCamelCase : Dict = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
]
] , )
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
] , )
UpperCamelCase : Optional[Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _a ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 0.2
UpperCamelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase : Tuple = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
] , )
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 2
UpperCamelCase : Tuple = pipeline("""zero-shot-object-detection""" )
UpperCamelCase : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
] , )
| 102
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 430
| 0
|
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
'''simple docstring'''
lowerCamelCase__: int = len(_UpperCamelCase )
lowerCamelCase__: Optional[int] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
lowerCamelCase__: Dict = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
lowerCamelCase__: Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
class lowerCamelCase__ :
def __init__( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = {}
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__a , """ -> """ , """ -> """.join([str(__a ) for j in self.vertex[i]] ) )
def lowerCamelCase_ ( self : int , __a : int , __a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__a )
else:
# else make a new vertex
lowerCamelCase__: Any = [to_vertex]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__a , __a )
def lowerCamelCase_ ( self : List[str] , __a : int , __a : list ):
'''simple docstring'''
lowerCamelCase__: Any = True
print(__a , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__a , __a )
if __name__ == "__main__":
_lowercase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 242
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Optional[int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ):
if self.graph.get(_lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowercase : Tuple = [[w, v]]
if not self.graph.get(_lowerCAmelCase ):
_lowercase : Optional[Any] = []
def __a ( self ):
return list(self.graph )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
if s == d:
return []
_lowercase : str = []
_lowercase : Union[str, Any] = []
if s == -2:
_lowercase : str = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
_lowercase : int = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Optional[int] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def __a ( self , _lowerCAmelCase=-1 ):
if c == -1:
_lowercase : Dict = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowercase : str = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : str = deque()
_lowercase : Optional[Any] = []
if s == -2:
_lowercase : List[Any] = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
_lowercase : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __a ( self , _lowerCAmelCase ):
return len(self.graph[u] )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Dict = []
_lowercase : Tuple = []
if s == -2:
_lowercase : Union[str, Any] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : List[Any] = s
_lowercase : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCAmelCase ) != 0:
_lowercase : str = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : int = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return sorted_nodes
def __a ( self ):
_lowercase : Tuple = []
_lowercase : Tuple = []
_lowercase : Dict = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = -2
_lowercase : Tuple = []
_lowercase : Dict = s
_lowercase : List[str] = False
_lowercase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : Union[str, Any] = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : int = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Tuple = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Dict = s
_lowercase : Union[str, Any] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
_lowercase : Optional[Any] = []
_lowercase : List[Any] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Dict = -2
_lowercase : Union[str, Any] = []
_lowercase : int = s
_lowercase : List[str] = False
_lowercase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : str = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : Dict = True
if len(_lowerCAmelCase ) != 0:
_lowercase : Union[str, Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Any = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Optional[Any] = s
_lowercase : Any = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
_lowercase : Optional[int] = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = time()
return end - begin
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : str = time()
self.bfs(_lowerCAmelCase )
_lowercase : str = time()
return end - begin
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : str = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ):
# check if the u exists
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowercase : Any = [[w, v]]
# add the other way
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowercase : List[Any] = [[w, u]]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
# the other way round
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
if s == d:
return []
_lowercase : Dict = []
_lowercase : Dict = []
if s == -2:
_lowercase : Tuple = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
_lowercase : Any = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Tuple = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def __a ( self , _lowerCAmelCase=-1 ):
if c == -1:
_lowercase : Dict = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowercase : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Optional[int] = deque()
_lowercase : Any = []
if s == -2:
_lowercase : Optional[int] = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
_lowercase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _lowerCAmelCase ):
return len(self.graph[u] )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = []
_lowercase : Optional[int] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = -2
_lowercase : Dict = []
_lowercase : int = s
_lowercase : Dict = False
_lowercase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : str = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : Any = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Union[str, Any] = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : int = s
_lowercase : Any = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = []
_lowercase : Dict = []
_lowercase : List[str] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Any = -2
_lowercase : Optional[Any] = []
_lowercase : List[str] = s
_lowercase : Optional[int] = False
_lowercase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : Tuple = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : str = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[str] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Optional[Any] = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Optional[int] = s
_lowercase : List[str] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def __a ( self ):
return list(self.graph )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
_lowercase : Any = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = time()
return end - begin
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Dict = time()
self.bfs(_lowerCAmelCase )
_lowercase : int = time()
return end - begin
| 66
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=[] ) -> Union[str, Any]:
__snake_case = size[0] - overlap_pixels * 2
__snake_case = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__snake_case = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__snake_case = np.pad(snake_case_ , mode='''linear_ramp''' , pad_width=snake_case_ , end_values=0 )
if "l" in remove_borders:
__snake_case = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__snake_case = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__snake_case = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__snake_case = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ) -> str:
return max(snake_case_ , min(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : [int] , snake_case_ : [int] , snake_case_ : [int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCamelCase__ ( snake_case_ : [int] , snake_case_ : int , snake_case_ : [int] ) -> Tuple:
__snake_case = list(snake_case_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__snake_case = clamp_rect(snake_case_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str:
__snake_case = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case_ , (original_slice, 0) )
return result
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str ) -> Optional[Any]:
__snake_case = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__snake_case = tile.crop(snake_case_ )
return tile
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int ) -> Optional[int]:
__snake_case = n % d
return n - divisor
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : AutoencoderKL , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a__ : int = 350 , ):
"""simple docstring"""
super().__init__(
vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , low_res_scheduler=a__ , scheduler=a__ , max_noise_level=a__ , )
def a (self : Tuple , a__ : str , a__ : int , a__ : Tuple , a__ : List[str] , a__ : Tuple , a__ : str , a__ : Dict , **a__ : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__snake_case = add_overlap_rect(a__ , a__ , image.size )
__snake_case = image.crop(a__ )
__snake_case = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__snake_case = translated_slice_x - (original_image_slice / 2)
__snake_case = max(0 , a__ )
__snake_case = squeeze_tile(a__ , a__ , a__ , a__ )
__snake_case = to_input.size
__snake_case = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__snake_case = super(a__ , self ).__call__(image=a__ , **a__ ).images[0]
__snake_case = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__snake_case = unsqueeze_tile(a__ , a__ )
__snake_case = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__snake_case = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__snake_case = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=a__ ) , mode='''L''' , )
final_image.paste(
a__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , a__ )
@torch.no_grad()
def __call__(self : Any , a__ : Union[str, List[str]] , a__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , a__ : int = 75 , a__ : float = 9.0 , a__ : int = 50 , a__ : Optional[Union[str, List[str]]] = None , a__ : Optional[int] = 1 , a__ : float = 0.0 , a__ : Optional[torch.Generator] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , a__ : int = 128 , a__ : int = 32 , a__ : int = 32 , ):
"""simple docstring"""
__snake_case = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__snake_case = math.ceil(image.size[0] / tile_size )
__snake_case = math.ceil(image.size[1] / tile_size )
__snake_case = tcx * tcy
__snake_case = 0
for y in range(a__ ):
for x in range(a__ ):
self._process_tile(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , prompt=a__ , num_inference_steps=a__ , guidance_scale=a__ , noise_level=a__ , negative_prompt=a__ , num_images_per_prompt=a__ , eta=a__ , generator=a__ , latents=a__ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCamelCase__ ( ) -> Tuple:
# Run a demo
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case_ , revision='''fp16''' , torch_dtype=torch.floataa )
__snake_case = pipe.to('''cuda''' )
__snake_case = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(snake_case_ : Any ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
__snake_case = pipe(image=snake_case_ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case_ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 592
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( a__ : Tuple ) -> List[str]:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(a__ ) < version.parse("0.17.0" ):
return method
def wrapper(self : Optional[Any] , *a__ : List[str] , **a__ : Tuple ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *a__ , **a__ )
return wrapper
| 589
|
def _lowercase ( a__ : int , a__ : int ) -> Dict:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a__ , int(b / 2 ) ) * actual_power(a__ , int(b / 2 ) )
else:
return a * actual_power(a__ , int(b / 2 ) ) * actual_power(a__ , int(b / 2 ) )
def _lowercase ( a__ : int , a__ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(a__ , a__ )
return actual_power(a__ , a__ )
if __name__ == "__main__":
print(power(-2, -3))
| 589
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCamelCase : str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCamelCase : Any = 0
__UpperCamelCase : str = 1
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : Dict = 3
__UpperCamelCase : Dict = 4
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = '''left'''
def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = 3
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , _snake_case )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(_snake_case )
lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , _snake_case )
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case , skip_special_tokens=_snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
lowerCAmelCase = []
sub_texts.append(_snake_case )
else:
current_sub_text.append(_snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCAmelCase = ''.join(_snake_case )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(_snake_case )
return clean_text
else:
return text
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 4
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_a : Dict = logging.get_logger(__name__)
_a : str = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_a : Tuple = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_a : List[str] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_a : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_a : Any = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_a : Any = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_a : List[str] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_a : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_a : str = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_a : str = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_a : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_a : Optional[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_a : Tuple = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_a : str = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_a : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_a : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_a : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_a : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_a : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_a : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_a : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_a : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_a : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_MAPPING
_a : List[Any] = auto_class_update(FlaxAutoModel)
class __A (_BaseAutoModelClass ):
snake_case :List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_a : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __A (_BaseAutoModelClass ):
snake_case :Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_a : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __A (_BaseAutoModelClass ):
snake_case :List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_a : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __A (_BaseAutoModelClass ):
snake_case :str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __A (_BaseAutoModelClass ):
snake_case :Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __A (_BaseAutoModelClass ):
snake_case :List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_a : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_a : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __A (_BaseAutoModelClass ):
snake_case :Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_a : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __A (_BaseAutoModelClass ):
snake_case :Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_a : str = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __A (_BaseAutoModelClass ):
snake_case :Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_a : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __A (_BaseAutoModelClass ):
snake_case :List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_a : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __A (_BaseAutoModelClass ):
snake_case :Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_a : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 168
| 0
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( lowercase__ : Dict ) -> Optional[int]:
if "model" in orig_key:
UpperCamelCase__ :Any = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
UpperCamelCase__ :int = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCamelCase__ :Optional[Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
UpperCamelCase__ :List[Any] = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
UpperCamelCase__ :List[str] = orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCamelCase__ :Tuple = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCamelCase__ :Tuple = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
UpperCamelCase__ :Union[str, Any] = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
UpperCamelCase__ :Dict = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
UpperCamelCase__ :List[Any] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
UpperCamelCase__ :List[Any] = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
UpperCamelCase__ :Tuple = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
UpperCamelCase__ :Any = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
UpperCamelCase__ :Tuple = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
UpperCamelCase__ :List[str] = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCamelCase__ :Union[str, Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCamelCase__ :str = """yoso.""" + orig_key
return orig_key
def A ( lowercase__ : str , lowercase__ : List[str] ) -> Tuple:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :List[Any] = orig_state_dict.pop(lowercase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCamelCase__ :Optional[int] = val
UpperCamelCase__ :List[Any] = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCamelCase__ :Optional[int] = torch.arange(lowercase__ ).expand((1, -1) ) + 2
return orig_state_dict
def A ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Dict ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = torch.load(lowercase__ , map_location="""cpu""" )["""model_state_dict"""]
UpperCamelCase__ :Union[str, Any] = YosoConfig.from_json_file(lowercase__ )
UpperCamelCase__ :Dict = YosoForMaskedLM(lowercase__ )
UpperCamelCase__ :Any = convert_checkpoint_helper(config.max_position_embeddings , lowercase__ )
print(model.load_state_dict(lowercase__ ) )
model.eval()
model.save_pretrained(lowercase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 383
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def A ( lowercase__ : str ) -> str:
UpperCamelCase__ :int = {}
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = json.load(lowercase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
import xla_spawn
UpperCamelCase__ :Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :int = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
UpperCamelCase__ :Any = time()
xla_spawn.main()
UpperCamelCase__ :Optional[Any] = time()
UpperCamelCase__ :Optional[Any] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __a ( self :Union[str, Any] ):
import xla_spawn
UpperCamelCase__ :List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
xla_spawn.main()
| 383
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.', A, )
super().__init__(*A, **A )
| 28
|
'''simple docstring'''
import unittest
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray | None = None , ):
'''simple docstring'''
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
if shape_a[0] != shape_b[0]:
UpperCAmelCase__ = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if shape_b[1] != shape_c[1]:
UpperCAmelCase__ = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase__ = np.linalg.inv(SCREAMING_SNAKE_CASE__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
UpperCAmelCase__ = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = np.block([[a, b], [b.T, c]] )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 603
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCamelCase : List[str] = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :List[Any] ):
# save results
if os.path.exists(__magic_name__ ):
if os.path.exists(os.path.join(__magic_name__ , '''config.json''' ) ) and os.path.isfile(
os.path.join(__magic_name__ , '''config.json''' ) ):
os.remove(os.path.join(__magic_name__ , '''config.json''' ) )
if os.path.exists(os.path.join(__magic_name__ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__magic_name__ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__magic_name__ , '''pytorch_model.bin''' ) )
else:
os.makedirs(__magic_name__ )
model.save_pretrained(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :int=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = p * torch.log(__magic_name__ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def _lowerCAmelCase ( __magic_name__ :Any ):
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__magic_name__ ) ) ) )
for row in range(len(__magic_name__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any]=True , __magic_name__ :Tuple=True , __magic_name__ :Any=None , __magic_name__ :Optional[int]=False ):
UpperCAmelCase_, UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
UpperCAmelCase_ = torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(__magic_name__ , __magic_name__ ).to(args.device )
head_mask.requires_grad_(requires_grad=__magic_name__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(__magic_name__ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_), ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(__magic_name__ , labels=__magic_name__ , head_mask=__magic_name__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__magic_name__ ):
UpperCAmelCase_ = entropy(attn.detach() , __magic_name__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__magic_name__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(__magic_name__ , __magic_name__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__magic_name__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__magic_name__ )
logger.info('''Head ranked by importance scores''' )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(__magic_name__ )
print_ad_tensor(__magic_name__ )
return attn_entropy, head_importance, total_loss
def _lowerCAmelCase ( __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] ):
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __magic_name__ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(__magic_name__ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float('''Inf''' )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(__magic_name__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(__magic_name__ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(__magic_name__ )
# Compute metric and head importance again
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , head_mask=__magic_name__ )
UpperCAmelCase_ = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __magic_name__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(__magic_name__ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :str ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__magic_name__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(__magic_name__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__magic_name__ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ , actually_pruned=__magic_name__ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __magic_name__ , __magic_name__ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __magic_name__ , __magic_name__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(__magic_name__ , args.output_dir )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__magic_name__ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__magic_name__ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__magic_name__ , type=__magic_name__ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__magic_name__ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__magic_name__ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__magic_name__ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__magic_name__ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__magic_name__ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__magic_name__ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__magic_name__ , default=4_2 )
parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__magic_name__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__magic_name__ , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device('''cuda''' , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
__magic_name__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__magic_name__ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(__magic_name__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(__magic_name__ ),)
UpperCAmelCase_ = TensorDataset(*__magic_name__ )
UpperCAmelCase_ = RandomSampler(__magic_name__ )
UpperCAmelCase_ = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(__magic_name__ , __magic_name__ , __magic_name__ )
prune_heads(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 407
|
from pathlib import Path
import fire
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :int ):
UpperCAmelCase_ = Path(__magic_name__ )
UpperCAmelCase_ = Path(__magic_name__ )
dest_dir.mkdir(exist_ok=__magic_name__ )
for path in src_dir.iterdir():
UpperCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase_ = dest_dir.joinpath(path.name )
print(__magic_name__ )
dest_path.open('''w''' ).write('''\n'''.join(__magic_name__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 407
| 1
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A (__lowerCamelCase :dict ):
return (data["data"], data["target"])
def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :np.ndarray ):
_lowerCAmelCase = XGBClassifier()
classifier.fit(__lowerCamelCase , __lowerCamelCase )
return classifier
def A ():
_lowerCAmelCase = load_iris()
_lowerCAmelCase , _lowerCAmelCase = data_handling(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 )
_lowerCAmelCase = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_lowerCAmelCase = xgboost(__lowerCamelCase , __lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , display_labels=__lowerCamelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 5
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ):
A = WavaVecaPhonemeCTCTokenizer
A = False
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
lowerCamelCase_ : Dict = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
lowerCamelCase_ : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCamelCase_ : List[Any] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCamelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=20 , UpperCamelCase_ : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )) for i in range(len(UpperCamelCase_ ) )]
lowerCamelCase_ : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
lowerCamelCase_ : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
lowerCamelCase_ : str = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase_ : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
lowerCamelCase_ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
lowerCamelCase_ : Optional[int] = ''' ''' + output_txt
lowerCamelCase_ : Dict = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCamelCase ( self : Tuple , **UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
lowerCamelCase_ : Union[str, Any] = tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
lowerCamelCase_ : Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase_ : Union[str, Any] = tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [3, 200] ) # mai should be <unk> (=3)
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Union[str, Any] = '''Hello how are you'''
lowerCamelCase_ : Optional[int] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Dict = '''Hello how are you'''
lowerCamelCase_ : int = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase_ ).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_ ).input_ids )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : str = '''Hello how are you'''
lowerCamelCase_ : Tuple = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Any = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCamelCase_ : Dict = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Dict = '''Hello how are you'''
lowerCamelCase_ : Union[str, Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Optional[int] = '''Hello how are you'''
lowerCamelCase_ : List[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase_ ).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_ ).input_ids )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase_ : Dict = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
lowerCamelCase_ : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase_ )
lowerCamelCase_ : Tuple = tokenizer.batch_decode(UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Optional[Any] = '''Hello how are you'''
lowerCamelCase_ : Optional[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Any = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : int = '''Hello how are you'''
lowerCamelCase_ : Union[str, Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Optional[Any] = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase_ )
lowerCamelCase_ : Any = '''Hello how are you'''
lowerCamelCase_ : Any = tokenizer(UpperCamelCase_ , phonemizer_lang='''en-us''' ).input_ids
lowerCamelCase_ : Dict = tokenizer(UpperCamelCase_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = tokenizer.decode(UpperCamelCase_ )
lowerCamelCase_ : Any = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase_ , '''ɛ l o h aʊ a ʁ j u''' )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Optional[int] = '''Hello how Are you'''
lowerCamelCase_ : Dict = '''hello how are you'''
lowerCamelCase_ : List[str] = tokenizer(UpperCamelCase_ ).input_ids
lowerCamelCase_ : List[Any] = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
lowerCamelCase_ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [d[key] for d in offsets]
return retrieved_list
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase_ : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCamelCase_ : str , UpperCamelCase_ : int ):
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase_ ) )
# transform list to ModelOutput
lowerCamelCase_ : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
[recursive_check(UpperCamelCase_ , UpperCamelCase_ ) for la, la in zip(UpperCamelCase_ , UpperCamelCase_ )]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
lowerCamelCase_ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase_ : Tuple = tokenizer.batch_decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = [tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : List[str] = tokenizer.vocab_size
lowerCamelCase_ : Optional[int] = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase_ : Tuple = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCamelCase_ : Dict = tokenizer.add_tokens(UpperCamelCase_ )
lowerCamelCase_ : Dict = tokenizer.vocab_size
lowerCamelCase_ : Dict = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) )
lowerCamelCase_ : Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase_ : List[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCamelCase_ : List[Any] = tokenizer.add_special_tokens(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = tokenizer.vocab_size
lowerCamelCase_ : Dict = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) )
lowerCamelCase_ : Union[str, Any] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Dict = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : List[Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCamelCase_ : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(output['''text'''] , UpperCamelCase_ )
| 501
| 0
|
'''simple docstring'''
from collections import deque
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = len(snake_case__ )
A : int = deque()
A : Any = [False for _ in range(snake_case__ )]
A : List[Any] = [-1 for _ in range(snake_case__ )]
A : int = index_of[:]
def strong_connect(snake_case__ , snake_case__ , snake_case__ ):
A : str = index # the number when this node is seen
A : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
A : Tuple = True
for w in g[v]:
if index_of[w] == -1:
A : Union[str, Any] = strong_connect(snake_case__ , snake_case__ , snake_case__ )
A : Union[str, Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A : int = []
A : Dict = stack.pop()
A : Optional[Any] = False
component.append(snake_case__ )
while w != v:
A : Tuple = stack.pop()
A : List[Any] = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
A : List[str] = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
lowercase : Tuple = 7
lowercase : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase : Union[str, Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase : List[Any] = [(u, v) for u, v in zip(source, target)]
lowercase : Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 343
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : str = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase : Optional[Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''retribert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : Any = num_hidden_layers
A : Any = num_attention_heads
A : List[Any] = hidden_act
A : Any = intermediate_size
A : str = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : Tuple = type_vocab_size
A : Optional[Any] = initializer_range
A : Union[str, Any] = layer_norm_eps
A : Dict = share_encoders
A : Dict = projection_dim
| 343
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
def __init__( self : str , __UpperCamelCase : list[str] ) -> Any:
_UpperCamelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def _UpperCamelCase ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str ) -> None:
_UpperCamelCase = 0
for character in keyword:
_UpperCamelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCamelCase = len(self.adlist ) - 1
else:
_UpperCamelCase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> None:
_UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
_UpperCamelCase = 0
while q:
_UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
_UpperCamelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
_UpperCamelCase = self.adlist[state]['''fail_state''']
_UpperCamelCase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_UpperCamelCase = 0
_UpperCamelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str ) -> dict[str, list[int]]:
_UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCamelCase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
_UpperCamelCase = self.adlist[current_state]['''fail_state''']
_UpperCamelCase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCamelCase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=None ) -> Any:
_UpperCamelCase = start
_UpperCamelCase = end
_UpperCamelCase = val
_UpperCamelCase = (start + end) // 2
_UpperCamelCase = left
_UpperCamelCase = right
def __repr__( self : List[str] ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class UpperCAmelCase_ :
def __init__( self : Any , __UpperCamelCase : Sequence , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = collection
_UpperCamelCase = function
if self.collection:
_UpperCamelCase = self._build_tree(0 , len(__UpperCamelCase ) - 1 )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
self._update_tree(self.root , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self._query_range(self.root , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> List[str]:
if start == end:
return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.collection[start] )
_UpperCamelCase = (start + end) // 2
_UpperCamelCase = self._build_tree(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self._build_tree(mid + 1 , __UpperCamelCase )
return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.fn(left.val , right.val ) , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Tuple:
if node.start == i and node.end == i:
_UpperCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , __UpperCamelCase , __UpperCamelCase )
else:
self._update_tree(node.right , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.fn(node.left.val , node.right.val )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> Dict:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __UpperCamelCase , __UpperCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __UpperCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.root is not None:
_UpperCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 420
| 1
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
def __init__(self ):
_UpperCAmelCase : str = {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : List[Any] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = []
def snake_case_ (self ):
return list(self.graph )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
if s == d:
return []
_UpperCAmelCase : int = []
_UpperCAmelCase : str = []
if s == -2:
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : int = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def snake_case_ (self , lowerCAmelCase__=-1 ):
if c == -1:
_UpperCAmelCase : Any = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Any = deque()
_UpperCAmelCase : str = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
_UpperCAmelCase : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case_ (self , lowerCAmelCase__ ):
return len(self.graph[u] )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def snake_case_ (self ):
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = -2
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Any = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[str] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : int = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : int = -2
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : int = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
_UpperCAmelCase : Any = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = time()
return end - begin
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Tuple = time()
self.bfs(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = time()
return end - begin
class __lowerCAmelCase :
def __init__(self ):
_UpperCAmelCase : List[str] = {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : str = [[w, u]]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
if s == d:
return []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def snake_case_ (self , lowerCAmelCase__=-1 ):
if c == -1:
_UpperCAmelCase : Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Tuple = deque()
_UpperCAmelCase : Optional[Any] = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case_ (self , lowerCAmelCase__ ):
return len(self.graph[u] )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = -2
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : int = -2
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : str = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : List[str] = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def snake_case_ (self ):
return list(self.graph )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
_UpperCAmelCase : int = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = time()
return end - begin
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Optional[Any] = time()
self.bfs(lowerCAmelCase__ )
_UpperCAmelCase : Any = time()
return end - begin
| 719
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __A ( lowerCAmelCase_ ):
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __A ( lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __A ( lowerCAmelCase_ ):
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase_ : List[str] = 1
while K:
lowerCAmelCase_ : int = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase_ : Any = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 156
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _UpperCAmelCase = 768 , ) -> Dict:
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> List[Any]:
UpperCamelCase_ = nn.Parameter(self.mean.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
return self
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 23
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = GPTSwaTokenizer
UpperCAmelCase : Dict = False
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = False
def __snake_case ( self : List[str]):
super().setUp()
# We have a SentencePiece fixture for testing
a : List[str] = GPTSwaTokenizer(__UpperCAmelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : Tuple , __UpperCAmelCase : List[str]):
a : Dict = "This is a test"
a : Union[str, Any] = "This is a test"
return input_text, output_text
def __snake_case ( self : Tuple):
a : Optional[Any] = "<s>"
a : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(__UpperCAmelCase) , 2000)
def __snake_case ( self : Union[str, Any]):
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __snake_case ( self : Tuple):
a : List[Any] = GPTSwaTokenizer(__UpperCAmelCase)
a : Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [465, 287, 265, 631, 842])
a : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a : Union[str, Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
a : List[str] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."])
# fmt: on
def __snake_case ( self : Optional[int]):
a : str = GPTSwaTokenizer(__UpperCAmelCase)
a : int = ["This is a test", "I was born in 92000, and this is falsé."]
a : str = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__UpperCAmelCase , __UpperCAmelCase):
self.assertListEqual(tokenizer.encode_fast(__UpperCAmelCase) , __UpperCAmelCase)
# Test that decode_fast returns the input text
for text, token_ids in zip(__UpperCAmelCase , __UpperCAmelCase):
self.assertEqual(tokenizer.decode_fast(__UpperCAmelCase) , __UpperCAmelCase)
@slow
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a : str = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__UpperCAmelCase , )
| 135
|
"""simple docstring"""
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Tuple = 0
for i in range(1 , 1_001 ):
total += i**i
return str(A_ )[-10:]
if __name__ == "__main__":
print(solution())
| 135
| 1
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : Optional[int] = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : List[Any] = jnp.array([tokenizer.encode(_lowerCAmelCase )] )
UpperCAmelCase__ : str = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : int = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase )["""last_hidden_state"""]
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 79
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __a ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 'focalnet'
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=False , snake_case=[192, 384, 768, 768] , snake_case=[2, 2, 6, 2] , snake_case=[2, 2, 2, 2] , snake_case=[3, 3, 3, 3] , snake_case="gelu" , snake_case=4.0 , snake_case=0.0 , snake_case=0.1 , snake_case=False , snake_case=1e-4 , snake_case=False , snake_case=False , snake_case=False , snake_case=0.02 , snake_case=1e-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ):
"""simple docstring"""
super().__init__(**snake_case )
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[str] = embed_dim
lowerCAmelCase__ : List[Any] = use_conv_embed
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : Union[str, Any] = focal_levels
lowerCAmelCase__ : Union[str, Any] = focal_windows
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : List[Any] = mlp_ratio
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : Tuple = layerscale_value
lowerCAmelCase__ : str = use_post_layernorm
lowerCAmelCase__ : str = use_post_layernorm_in_modulation
lowerCAmelCase__ : Union[str, Any] = normalize_modulator
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : str = encoder_stride
lowerCAmelCase__ : Union[str, Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 453
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
lowerCamelCase :List[str] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase :Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__UpperCAmelCase )} , )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a: bool = field(
default=__UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a: bool = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _A ( self: List[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class UpperCAmelCase :
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a: Optional[str] = field(default=__UpperCAmelCase , metadata={"help": "The input training data file (a text file)."} )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
a: Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
a: bool = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a: Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a: Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
a: Optional[int] = field(
default=__UpperCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a: bool = field(
default=__UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _A ( self: List[str] ):
if self.train_file is not None:
_a = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_a = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> str:
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as f:
_a = [json.loads(_snake_case ) for line in f.read().splitlines() if (len(_snake_case ) > 0 and not line.isspace())]
assert len(_snake_case ) == len(_snake_case )
_a = {c: dataset[c] for c in dataset.column_names}
_a = refs
return Dataset.from_dict(_snake_case )
def __snake_case ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
_a = {}
if data_args.train_file is not None:
_a = data_args.train_file
if data_args.validation_file is not None:
_a = data_args.validation_file
_a = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
_a = '''text'''
_a = load_dataset(_snake_case , data_files=_snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_a = AutoConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_a = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_a = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
_a = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_a = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_snake_case )
elif model_args.model_name_or_path:
_a = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
_a = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_a = AutoModelForMaskedLM.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_a = datasets['''train'''].column_names
else:
_a = datasets['''validation'''].column_names
_a = '''text''' if '''text''' in column_names else column_names[0]
_a = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCamelCase ):
# Remove empty lines
_a = [line for line in examples['''text'''] if len(_snake_case ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=_snake_case , truncation=_snake_case , max_length=data_args.max_seq_length )
_a = datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_a = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_a = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_a = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_a = False
# Data collator
# This one will take care of randomly masking the tokens.
_a = DataCollatorForWholeWordMask(tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a = Trainer(
model=_snake_case , args=_snake_case , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_a = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_a = model_args.model_name_or_path
else:
_a = None
_a = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
_a = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = math.exp(eval_output['''eval_loss'''] )
_a = perplexity
_a = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def __snake_case ( _UpperCamelCase ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 711
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = '▁'
lowerCamelCase :Optional[Any] = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase :Union[str, Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase :str = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase :Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = collections.OrderedDict()
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
_a = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
_a = token.rstrip('''\n''' )
_a = index
return vocab
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: str = PRETRAINED_VOCAB_FILES_MAP
a: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: str = ["input_ids", "attention_mask"]
def __init__( self: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: List[Any]="[SEP]" , __UpperCamelCase: Optional[int]="[SEP]" , __UpperCamelCase: List[str]="[SEP]" , __UpperCamelCase: Optional[Any]="[UNK]" , __UpperCamelCase: Any="[PAD]" , __UpperCamelCase: str="[CLS]" , __UpperCamelCase: Tuple="[MASK]" , __UpperCamelCase: Optional[Dict[str, Any]] = None , **__UpperCamelCase: str , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_a = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
_a = f"[unused{i}]"
_a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_a = 12
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCamelCase )
def __getstate__( self: Tuple ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self: Optional[Any] , __UpperCamelCase: int ):
_a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
_a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self: Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def _A ( self: Dict ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: str , __UpperCamelCase: str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self: Tuple , __UpperCamelCase: Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self: List[Any] , __UpperCamelCase: str ):
_a = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def _A ( self: Dict , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _A ( self: List[Any] , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 346
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
SCREAMING_SNAKE_CASE__ : List[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
SCREAMING_SNAKE_CASE__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]:
snake_case__ =[]
for i in range(len(UpperCamelCase_ ) ):
snake_case__ =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case__ =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case__ =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase_ )
return next_generation
def a ( UpperCamelCase_ : list[list[int]] , UpperCamelCase_ : int ) -> list[Image.Image]:
snake_case__ =[]
for _ in range(UpperCamelCase_ ):
# Create output image
snake_case__ =Image.new('RGB' , (len(cells[0] ), len(UpperCamelCase_ )) )
snake_case__ =img.load()
# Save cells to image
for x in range(len(UpperCamelCase_ ) ):
for y in range(len(cells[0] ) ):
snake_case__ =255 - cells[y][x] * 255
snake_case__ =(colour, colour, colour)
# Save image
images.append(UpperCamelCase_ )
snake_case__ =new_generation(UpperCamelCase_ )
return images
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 538
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=36 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_mask
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =embedding_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_hidden_groups
snake_case__ =num_attention_heads
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_vocab_size
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =scope
def _lowercase ( self ) -> Tuple:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =None
if self.use_input_mask:
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
if self.use_token_type_ids:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
snake_case__ =AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
snake_case__ =AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
snake_case__ =AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
snake_case__ =AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =self.num_labels
snake_case__ =AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
snake_case__ =self.num_labels
snake_case__ =AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
snake_case__ =self.num_choices
snake_case__ =AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Optional[int] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : int = True
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
snake_case__ =super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
snake_case__ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _lowercase ( self ) -> int:
snake_case__ =AlbertModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ =type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _lowercase ( self ) -> Union[str, Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> str:
snake_case__ =AlbertModel.from_pretrained('albert-base-v2' )
snake_case__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case__ =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
snake_case__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
snake_case__ =torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 538
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _a ( UpperCAmelCase__ , UpperCAmelCase__=10 ) -> Any:
__SCREAMING_SNAKE_CASE = []
for _ in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _a ( UpperCAmelCase__ , UpperCAmelCase__=10 ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for step in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ )
scheduler.load_state_dict(UpperCAmelCase__ )
return lrs
@require_torch
class A__( unittest.TestCase ):
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
__SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
__SCREAMING_SNAKE_CASE = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
__SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__SCREAMING_SNAKE_CASE = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=__SCREAMING_SNAKE_CASE , scale_parameter=__SCREAMING_SNAKE_CASE , warmup_init=__SCREAMING_SNAKE_CASE , )
for _ in range(10_00 ):
__SCREAMING_SNAKE_CASE = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class A__( unittest.TestCase ):
lowerCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase = 10
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=None ) -> Dict:
"""simple docstring"""
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE , msg=__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__SCREAMING_SNAKE_CASE = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__SCREAMING_SNAKE_CASE = unwrap_schedule(__SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
__SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(__SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , msg=f"""failed for {scheduler_func} in save and reload""" )
class A__:
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = fn
def __call__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
return self.fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) )
| 721
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ =logging.get_logger(__name__)
class A__( __magic_name__ ):
lowerCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = log_spec - 20.0
__SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__SCREAMING_SNAKE_CASE = audio_features[i]
__SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
__SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features}
__SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 690
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : Dict , __magic_name__ : Dict[str, int] , __magic_name__ : List[str] , __magic_name__ : int = None , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : str = pad_token_id
__snake_case : Tuple = max_length
__snake_case : Dict = vocab
__snake_case : Optional[Any] = merges
__snake_case : Optional[Any] = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : GPTaTokenizer , *__magic_name__ : int , **__magic_name__ : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [""" """.join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()]
__snake_case : List[str] = tokenizer.get_vocab()
return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : Union[str, os.PathLike] , *__magic_name__ : Dict , **__magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ )
return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : Any , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
return cls(**__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tf_tokenizer(__magic_name__ )
__snake_case : Any = tf.ones_like(__magic_name__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case , __snake_case : Any = pad_model_inputs(
__magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 26
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _snake_case : Tuple , _snake_case : int=7 , _snake_case : Any=3 , _snake_case : Dict=30 , _snake_case : List[str]=400 , _snake_case : Any=True , _snake_case : Optional[Any]=None , _snake_case : Union[str, Any]=0.9 , _snake_case : Dict=None , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=[0.5, 0.5, 0.5] , _snake_case : List[str]=[0.5, 0.5, 0.5] , ):
__lowercase : Optional[Any] = size if size is not None else {'''shortest_edge''': 30}
__lowercase : Tuple = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
__lowercase : Optional[Any] = parent
__lowercase : List[Any] = batch_size
__lowercase : int = num_channels
__lowercase : Optional[Any] = min_resolution
__lowercase : Any = max_resolution
__lowercase : List[str] = do_resize_and_center_crop
__lowercase : Optional[int] = size
__lowercase : Any = crop_pct
__lowercase : Dict = crop_size
__lowercase : Tuple = do_normalize
__lowercase : int = image_mean
__lowercase : Union[str, Any] = image_std
def snake_case_ ( self : str ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = PoolFormerImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = PoolFormerImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : str ):
__lowercase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
self.assertTrue(hasattr(_snake_case , '''crop_pct''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
def snake_case_ ( self : int ):
__lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
__lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case_ ( self : List[str] ):
pass
def snake_case_ ( self : List[str] ):
# Initialize image_processing
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : int = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ):
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__lowercase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[str] ):
# Initialize image_processing
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 701
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = None
A__ : Any = BloomTokenizerFast
A__ : Optional[Any] = BloomTokenizerFast
A__ : List[Any] = True
A__ : int = False
A__ : Any = '''tokenizer_file'''
A__ : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def snake_case_ ( self : Any ):
super().setUp()
__lowercase : int = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **_snake_case : Any ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : Any ):
__lowercase : Dict = self.get_rust_tokenizer()
__lowercase : Optional[int] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__lowercase : Union[str, Any] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__lowercase : Optional[int] = tokenizer.batch_encode_plus(_snake_case )['''input_ids''']
self.assertListEqual(_snake_case , _snake_case )
__lowercase : List[Any] = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : str , _snake_case : int=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase : List[Any] = '''This is a simple input'''
__lowercase : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
__lowercase : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__lowercase : Dict = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_snake_case )
__lowercase : Optional[Any] = next(iter(_snake_case ) )['''premise'''] # pick up one data
__lowercase : List[str] = list(sample_data.values() )
__lowercase : List[str] = list(map(tokenizer.encode , _snake_case ) )
__lowercase : Tuple = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 284
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : Tuple=13 , _A : Any=7 , _A : Optional[int]=True , _A : str=True , _A : List[str]=True , _A : int=True , _A : Tuple=99 , _A : List[Any]=32 , _A : List[str]=5 , _A : Union[str, Any]=4 , _A : Dict=4 , _A : Tuple="gelu" , _A : Any=0.0 , _A : List[Any]=0.1 , _A : Any=True , _A : Optional[int]=512 , _A : Optional[int]=16 , _A : Optional[int]=2 , _A : List[str]=0.02 , _A : Tuple=3 , _A : Tuple=4 , _A : Optional[Any]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : List[str] = is_training
__SCREAMING_SNAKE_CASE : int = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_multiple_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout
__SCREAMING_SNAKE_CASE : str = attention_dropout
__SCREAMING_SNAKE_CASE : List[str] = weight_tying
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXJapaneseModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXJapaneseModel(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , _A : Union[str, Any] , _A : int , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Any = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A , output_hidden_states=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE : Tuple = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE : Tuple = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = '''abeja/gpt-neox-japanese-2.7b'''
__SCREAMING_SNAKE_CASE : List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__SCREAMING_SNAKE_CASE : str = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__SCREAMING_SNAKE_CASE : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : int = GPTNeoXJapaneseForCausalLM.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : Any = []
for prompt in prompts:
__SCREAMING_SNAKE_CASE : str = tokenizer(_A , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Any = model.generate(_A , max_length=50 )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
predicted_outputs += generated_string
self.assertListEqual(_A , _A )
| 74
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170
| 0
|
def _snake_case ( lowercase__ ):
if num < 0:
return False
_lowerCamelCase : int = num
_lowerCamelCase : int = 0
while num > 0:
_lowerCamelCase : Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
from __future__ import annotations
lowercase__ = 10
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Optional[int] = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCamelCase : list[list] = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCamelCase : Dict = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
_lowerCamelCase : str = 0
for b in range(lowercase__ ):
for i in buckets[b]:
_lowerCamelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 492
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a__ : int = None
a__ : int = logging.get_logger(__name__)
a__ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a__ : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
a__ : Optional[Any] = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
a__ : List[str] = "▁"
class UpperCAmelCase__( __lowercase ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = BigBirdTokenizer
A : Any = ["input_ids", "attention_mask"]
A : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="<pad>" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : List[str]="[MASK]" , lowerCAmelCase : Optional[Any]="[CLS]" , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else bos_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else eos_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else unk_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else pad_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else cls_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : int = None , lowerCAmelCase : List[Any] = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase):
copyfile(self.vocab_file , lowerCAmelCase)
return (out_vocab_file,)
| 622
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 612
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[str]:
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=7_0_0_0_0 ) -> Tuple:
_UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
_UpperCAmelCase = np.dot(snake_case , snake_case )
_UpperCAmelCase = sigmoid_function(snake_case )
_UpperCAmelCase = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase = theta - alpha * gradient # updating the weights
_UpperCAmelCase = np.dot(snake_case , snake_case )
_UpperCAmelCase = sigmoid_function(snake_case )
_UpperCAmelCase = cost_function(snake_case , snake_case )
if iterations % 1_0_0 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a = datasets.load_iris()
a = iris.data[:, :2]
a = (iris.target != 0) * 1
a = 0.1
a = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((a) , (a)) = (x[:, 0].min(), x[:, 0].max())
((a) , (a)) = (x[:, 1].min(), x[:, 1].max())
((a) , (a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a = np.c_[xxa.ravel(), xxa.ravel()]
a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 175
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_ = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """albert"""
def __init__( self , snake_case_=3_0000 , snake_case_=128 , snake_case_=4096 , snake_case_=12 , snake_case_=1 , snake_case_=64 , snake_case_=1_6384 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=512 , snake_case_=2 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__UpperCAmelCase: str = vocab_size
__UpperCAmelCase: Optional[Any] = embedding_size
__UpperCAmelCase: List[str] = hidden_size
__UpperCAmelCase: Any = num_hidden_layers
__UpperCAmelCase: int = num_hidden_groups
__UpperCAmelCase: List[Any] = num_attention_heads
__UpperCAmelCase: Dict = inner_group_num
__UpperCAmelCase: Optional[int] = hidden_act
__UpperCAmelCase: int = intermediate_size
__UpperCAmelCase: Optional[int] = hidden_dropout_prob
__UpperCAmelCase: int = attention_probs_dropout_prob
__UpperCAmelCase: Any = max_position_embeddings
__UpperCAmelCase: Union[str, Any] = type_vocab_size
__UpperCAmelCase: Union[str, Any] = initializer_range
__UpperCAmelCase: Dict = layer_norm_eps
__UpperCAmelCase: List[str] = classifier_dropout_prob
__UpperCAmelCase: List[Any] = position_embedding_type
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCAmelCase: List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 523
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def UpperCamelCase__ ( _lowercase : Any=2 , _lowercase : str=3 , _lowercase : List[str]=1_6 , _lowercase : int = 1_0 , _lowercase : int = 2 ) -> str:
def get_dataset(_lowercase : Optional[Any] ):
__UpperCAmelCase: List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase: Tuple = get_dataset(_lowercase )
__UpperCAmelCase: Dict = get_dataset(_lowercase )
__UpperCAmelCase: Dict = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCAmelCase: Tuple = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int=None ) -> Optional[int]:
__UpperCAmelCase: Optional[int] = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase, __UpperCAmelCase: Tuple = batch
__UpperCAmelCase: List[str] = model(_lowercase )
__UpperCAmelCase: List[Any] = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return x * self.a + self.b
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[Any] = DummyModel()
__UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders()
__UpperCAmelCase: Dict = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: Union[str, Any] = Accelerator(project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Tuple = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: Optional[int] = DummyModel()
__UpperCAmelCase: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: int = dummy_dataloaders()
# Train baseline
__UpperCAmelCase: Union[str, Any] = Accelerator()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Dict = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
__UpperCAmelCase: int = os.path.join(snake_case_ , """initial""" )
accelerator.save_state(snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
__UpperCAmelCase: Union[str, Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): Tuple = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase: str = DummyModel()
__UpperCAmelCase: str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders()
__UpperCAmelCase: Optional[Any] = Accelerator()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Optional[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): Any = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
__UpperCAmelCase: Optional[int] = os.path.join(snake_case_ , """checkpoint""" )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[Any] = DummyModel()
__UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Tuple = dummy_dataloaders()
__UpperCAmelCase: Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((__UpperCAmelCase), (__UpperCAmelCase)): Optional[int] = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
__UpperCAmelCase: List[Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase: str = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Dict = dummy_dataloaders()
__UpperCAmelCase: Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
((__UpperCAmelCase), (__UpperCAmelCase)): Union[str, Any] = model.a.item(), model.b.item()
__UpperCAmelCase: Optional[Any] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item()
__UpperCAmelCase: Tuple = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = torch.tensor([1, 2, 3] )
__UpperCAmelCase: Tuple = torch.tensor([2, 3, 4] )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: int = torch.optim.Adam(net.parameters() )
__UpperCAmelCase: str = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase: Tuple = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase: Optional[int] = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.9_9 )
__UpperCAmelCase, __UpperCAmelCase: Any = dummy_dataloaders()
__UpperCAmelCase: Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: List[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
__UpperCAmelCase: Union[str, Any] = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: Optional[int] = DummyModel()
__UpperCAmelCase: Tuple = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase: List[Any] = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '/tmp/accelerate/state_checkpointing'
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 523
| 1
|
import qiskit
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 2 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_SCREAMING_SNAKE_CASE ) ) , list(range(_SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'dpr'
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ):
super().__init__(pad_token_id=_A , **_A)
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = position_embedding_type
| 620
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCamelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCamelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return float((preds == labels).mean() )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[int] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : int = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
_lowercase : int = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __a ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 66
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A__ : List[str] =logging.getLogger(__name__)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
_lowerCAmelCase = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
_lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
_lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
_lowerCAmelCase = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
_lowerCAmelCase = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(lowerCAmelCase )} examples to process." )
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = 1_00_00
_lowerCAmelCase = time.time()
for text in data:
_lowerCAmelCase = f"{bos} {text.strip()} {sep}"
_lowerCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
_lowerCAmelCase = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_lowerCAmelCase = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(lowerCAmelCase )} examples processed." )
_lowerCAmelCase = f"{args.dump_file}.{args.tokenizer_name}.pickle"
_lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowerCAmelCase = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
_lowerCAmelCase = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 207
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase_ = logging.getLogger()
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__snake_case : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple="eval" ):
'''simple docstring'''
__snake_case : str = os.path.join(__SCREAMING_SNAKE_CASE , F'''{split}_results.json''' )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
raise ValueError(F'''can\'t find {path}''' )
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( __a ):
def snake_case__ ( self : Dict ):
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : List[Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_glue.main()
__snake_case : List[str] = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_clm_flax.main()
__snake_case : str = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def snake_case__ ( self : List[str] ):
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Optional[Any] = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_summarization_flax.main()
__snake_case : Optional[Any] = get_results(A__ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def snake_case__ ( self : Optional[int] ):
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_mlm_flax.main()
__snake_case : List[str] = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_ta_mlm_flax.main()
__snake_case : Dict = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def snake_case__ ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_ner.main()
__snake_case : Union[str, Any] = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : List[Any] = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_qa.main()
__snake_case : str = get_results(A__ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 700
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowercase_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowercase_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def snake_case__ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="auto" , _lowerCAmelCase : str=-1 , _lowerCAmelCase : Union[str, Any]=0.9 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Any=5_00 , _lowerCAmelCase : Optional[Any]="gpt2-large" , _lowerCAmelCase : Any=-1 , _lowerCAmelCase : Optional[Any]=10_24 , _lowerCAmelCase : Tuple=25 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=25 , ):
__snake_case : Optional[int] = compute_mauve(
p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , )
return out
| 390
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
snake_case__ : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
snake_case__ : Dict = answer
return answer
snake_case__ : Any = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
snake_case__ : int = [0] * (target + 1)
snake_case__ : Any = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = 3
A_ = 5
A_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 270
|
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
snake_case__ : int = multiprocessing.Manager()
snake_case__ : Any = manager.list()
snake_case__ : List[str] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
snake_case__ : List[Any] = shutil.rmtree
snake_case__ : Tuple = os.rmdir
snake_case__ : Any = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
snake_case__ : Union[str, Any] = {}
with swallow_io():
with time_limit(__SCREAMING_SNAKE_CASE ):
exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
snake_case__ : List[Any] = rmtree
snake_case__ : Optional[int] = rmdir
snake_case__ : Dict = chdir
@contextlib.contextmanager
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
def signal_handler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> List[str]:
snake_case__ : Dict = WriteOnlyStringIO()
with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ):
with redirect_stdin(__SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(__SCREAMING_SNAKE_CASE ):
yield dirname
class lowercase_ ( lowerCAmelCase_ ):
pass
class lowercase_ ( io.StringIO ):
def _lowerCAmelCase ( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ):
raise OSError
def _lowerCAmelCase ( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Tuple ):
raise OSError
def _lowerCAmelCase ( self : Any , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
raise OSError
def _lowerCAmelCase ( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ):
return False
class lowercase_ ( contextlib._RedirectStream ): # type: ignore
A_ = "stdin"
@contextlib.contextmanager
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
if root == ".":
yield
return
snake_case__ : str = os.getcwd()
os.chdir(__SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE=None ) -> Any:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
import os
snake_case__ : List[Any] = '1'
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
snake_case__ : Dict = None
snake_case__ : Tuple = None
snake_case__ : List[str] = None
snake_case__ : Any = None
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
snake_case__ : int = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : List[str] = None
snake_case__ : List[str] = None
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
snake_case__ : Tuple = None
snake_case__ : Optional[int] = None
snake_case__ : int = None
import shutil
snake_case__ : List[str] = None
snake_case__ : List[Any] = None
snake_case__ : List[Any] = None
import subprocess
snake_case__ : int = None # type: ignore
snake_case__ : Dict = None
import sys
snake_case__ : Tuple = None
snake_case__ : Tuple = None
snake_case__ : List[str] = None
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = None
| 270
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case : int = parse(importlib.metadata.version('''torch'''))
def __lowercase ( __lowerCAmelCase : Union[str, Version] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
a__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = parse(importlib.metadata.version(__lowerCAmelCase ) )
return operation(__lowerCAmelCase , parse(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
return compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 657
|
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ ( lowerCamelCase_ : BertModel , lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__a : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : Union[str, Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase_ : str ):
for patt, repl in iter(lowerCamelCase_ ):
__a : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCamelCase_ : np.ndarray , lowerCamelCase_ : str , lowerCamelCase_ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Optional[Any] = to_tf_var_name(lowerCamelCase_ )
__a : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : Union[str, Any] = torch_tensor.T
__a : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = session.run(lowerCamelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}''' )
__a : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any]=None ):
__a : str = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory in which to save tensorflow model' )
__a : List[Any] = parser.parse_args(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Tuple ) -> Dict:
"""simple docstring"""
a : Optional[Any] = nn.functional.normalize(snake_case )
a : List[Any] = nn.functional.normalize(snake_case )
return torch.mm(snake_case , normalized_text_embeds.t() )
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = CLIPConfig
A : Union[str, Any] = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : CLIPConfig):
"""simple docstring"""
super().__init__(UpperCAmelCase_)
a : Dict = CLIPVisionModel(config.vision_config)
a : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCAmelCase_)
a : List[str] = nn.Parameter(torch.ones(1_7 , config.projection_dim) , requires_grad=UpperCAmelCase_)
a : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=UpperCAmelCase_)
a : Any = nn.Parameter(torch.ones(1_7) , requires_grad=UpperCAmelCase_)
a : Optional[int] = nn.Parameter(torch.ones(3) , requires_grad=UpperCAmelCase_)
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : int = self.vision_model(UpperCAmelCase_)[1] # pooled_output
a : Tuple = self.visual_projection(UpperCAmelCase_)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : List[Any] = cosine_distance(UpperCAmelCase_ , self.special_care_embeds).cpu().float().numpy()
a : Tuple = cosine_distance(UpperCAmelCase_ , self.concept_embeds).cpu().float().numpy()
a : Tuple = []
a : str = image_embeds.shape[0]
for i in range(UpperCAmelCase_):
a : str = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
a : Tuple = 0.0
for concept_idx in range(len(special_cos_dist[0])):
a : Union[str, Any] = special_cos_dist[i][concept_idx]
a : Tuple = self.special_care_embeds_weights[concept_idx].item()
a : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
a : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0])):
a : Optional[Any] = cos_dist[i][concept_idx]
a : int = self.concept_embeds_weights[concept_idx].item()
a : Any = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCAmelCase_)
result.append(UpperCAmelCase_)
a : Optional[int] = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor):
"""simple docstring"""
a : Optional[Any] = self.vision_model(UpperCAmelCase_)[1] # pooled_output
a : int = self.visual_projection(UpperCAmelCase_)
a : Optional[Any] = cosine_distance(UpperCAmelCase_ , self.special_care_embeds)
a : List[str] = cosine_distance(UpperCAmelCase_ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
a : int = 0.0
a : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
a : Optional[int] = torch.any(special_scores > 0 , dim=1)
a : Union[str, Any] = special_care * 0.01
a : Tuple = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
a : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
a : Dict = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 712
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase :
"""simple docstring"""
A : Optional[int] = None
A : Optional[jnp.ndarray] = None
A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str):
"""simple docstring"""
return cls()
@dataclass
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : jnp.ndarray
A : jnp.ndarray
A : KarrasVeSchedulerState
class UpperCamelCase ( a_ , a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1_0_0 , UpperCAmelCase_ : float = 1.0_07 , UpperCAmelCase_ : float = 8_0 , UpperCAmelCase_ : float = 0.05 , UpperCAmelCase_ : float = 5_0 , ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple = ()):
"""simple docstring"""
a : str = jnp.arange(0 , UpperCAmelCase_)[::-1].copy()
a : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase_ , schedule=jnp.array(UpperCAmelCase_ , dtype=jnp.floataa) , timesteps=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
a : Tuple = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Optional[Any] = random.split(UpperCAmelCase_ , num=1)
a : Dict = self.config.s_noise * random.normal(key=UpperCAmelCase_ , shape=sample.shape)
a : List[str] = sigma + gamma * sigma
a : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : Dict = sample_hat + sigma_hat * model_output
a : Dict = (sample_hat - pred_original_sample) / sigma_hat
a : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : Union[str, Any] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
"""simple docstring"""
raise NotImplementedError()
| 610
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
|
"""simple docstring"""
from maths.prime_check import is_prime
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
A = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 1
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
snake_case = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
snake_case = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
snake_case = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def A ( self , lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = spearmanr(lowercase_ , lowercase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 704
|
"""simple docstring"""
import os
import sys
import unittest
snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
snake_case = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
snake_case = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {'BertModelTest': 'BertModelTester'}
SCREAMING_SNAKE_CASE = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
| 406
| 0
|
"""simple docstring"""
class lowerCamelCase :
def __init__( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = []
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE__ = self.__min_dist_top_down_dp(__UpperCAmelCase , n - 1 )
SCREAMING_SNAKE_CASE__ = self.__min_dist_top_down_dp(m - 1 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE__ = 1 + min(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE__ = worda
SCREAMING_SNAKE_CASE__ = worda
SCREAMING_SNAKE_CASE__ = [[-1 for _ in range(len(__UpperCAmelCase ) )] for _ in range(len(__UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCAmelCase ) - 1 , len(__UpperCAmelCase ) - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE__ = worda
SCREAMING_SNAKE_CASE__ = worda
SCREAMING_SNAKE_CASE__ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE__ = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE__ = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE__ = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE__ = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE__ = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE__ = 1 + min(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
A_ : List[str] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
A_ : Any = input("Enter the first string: ").strip()
A_ : int = input("Enter the second string: ").strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 196
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=3_7 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Dict=1_0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[Any]=2 , ) -> Any:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]=False ) -> str:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE__ = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
| 196
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = 2
UpperCamelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20
| 0
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A = get_tests_dir("""fixtures""")
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = mock.Mock()
__UpperCAmelCase : str = 500
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : int = HTTPError
__UpperCAmelCase : Dict = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase_) as mock_head:
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def a_ ( self : int):
"""simple docstring"""
with self.assertRaises(UpperCamelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCamelCase_)
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def a_ ( cls : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_)
@classmethod
def a_ ( cls : int):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
__UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token)
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = ViTImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
__UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token)
__UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : Optional[int]):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
__UpperCAmelCase : int = CustomImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=UpperCamelCase_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 77
|
"""simple docstring"""
def __magic_name__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=False ) -> Optional[Any]:
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
lowercase : Tuple = len(set_a.intersection(__snake_case ) )
if alternative_union:
lowercase : Union[str, Any] = len(__snake_case ) + len(__snake_case )
else:
lowercase : Optional[Any] = len(set_a.union(__snake_case ) )
return intersection / union
if isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) ):
lowercase : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
lowercase : List[str] = len(__snake_case ) + len(__snake_case )
return len(__snake_case ) / union
else:
lowercase : Dict = set_a + [element for element in set_b if element not in set_a]
return len(__snake_case ) / len(__snake_case )
return len(__snake_case ) / len(__snake_case )
return None
if __name__ == "__main__":
_A : List[str] = {"""a""", """b""", """c""", """d""", """e"""}
_A : Dict = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 361
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : str , A__ : List[str] , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : List[str] , A__ : Any , A__ : int , A__ : Optional[Any] , A__ : Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : List[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : Any , A__ : Union[str, Any] , A__ : List[str] , A__ : Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 80
|
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[str] = mock.Mock()
snake_case__ : Optional[int] = 5_0_0
snake_case__ : int = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Union[str, Any] = {}
snake_case__ : Optional[int] = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : int = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : str ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : List[str] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Union[str, Any] = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Optional[int] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Dict = Trie()
snake_case__ : Tuple = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 297
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger("""transformers.models.speecht5""")
__lowerCamelCase : int = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
__lowerCamelCase : str = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
__lowerCamelCase : List[str] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
__lowerCamelCase : List[Any] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
__lowerCamelCase : Dict = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
__lowerCamelCase : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
__lowerCamelCase : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : str = []
__lowerCamelCase : List[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
__lowerCamelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : List[str] = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : int = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : str = value
elif weight_type == "running_mean":
snake_case__ : Tuple = value
elif weight_type == "running_var":
snake_case__ : Any = value
elif weight_type == "num_batches_tracked":
snake_case__ : Dict = value
else:
snake_case__ : List[Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Optional[Any] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__, snake_case__ : str = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : int ):
snake_case__ : List[str] = []
if task == "s2t":
snake_case__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Dict = MAPPING_S2T
snake_case__ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case__ : int = None
snake_case__ : List[Any] = MAPPING_T2S
snake_case__ : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case__ : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Optional[Any] = MAPPING_S2S
snake_case__ : str = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case__, snake_case__ : int = key.split(".*." )
if prefix in name and suffix in name:
snake_case__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Optional[Any] = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : Dict = "weight_g"
elif "weight_v" in name:
snake_case__ : int = "weight_v"
elif "bias" in name:
snake_case__ : Any = "bias"
elif "weight" in name:
snake_case__ : List[str] = "weight"
elif "running_mean" in name:
snake_case__ : Union[str, Any] = "running_mean"
elif "running_var" in name:
snake_case__ : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
snake_case__ : Any = "num_batches_tracked"
else:
snake_case__ : Dict = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : List[Any] = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : str=None , ):
if config_path is not None:
snake_case__ : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Optional[Any] = SpeechTaConfig()
if task == "s2t":
snake_case__ : Optional[Any] = config.max_text_positions
snake_case__ : str = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
snake_case__ : Optional[int] = 1876
snake_case__ : Optional[int] = 600
snake_case__ : List[Any] = config.max_speech_positions
snake_case__ : Tuple = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
snake_case__ : Any = 1876
snake_case__ : List[str] = config.max_speech_positions
snake_case__ : Dict = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
snake_case__ : Optional[int] = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ )
snake_case__ : Dict = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case__ : int = SpeechTaFeatureExtractor()
snake_case__ : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
snake_case__ : Optional[Any] = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCamelCase ( UpperCAmelCase ):
for param in module.parameters():
lowercase__ : Union[str, Any] = False
def __UpperCamelCase ( ):
lowercase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ : Tuple = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Union[str, Any] = plt.imshow(UpperCAmelCase )
fig.axes.get_xaxis().set_visible(UpperCAmelCase )
fig.axes.get_yaxis().set_visible(UpperCAmelCase )
plt.show()
def __UpperCamelCase ( ):
lowercase__ : List[Any] = datetime.now()
lowercase__ : List[str] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 428
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : Union[str, Any] = parent
lowercase__ : Union[str, Any] = 13
lowercase__ : Dict = 7
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = 99
lowercase__ : Dict = 32
lowercase__ : Optional[int] = 2
lowercase__ : str = 4
lowercase__ : List[str] = 37
lowercase__ : Tuple = '''gelu'''
lowercase__ : Optional[int] = 0.1
lowercase__ : Optional[Any] = 0.1
lowercase__ : Dict = 512
lowercase__ : Optional[Any] = 16
lowercase__ : int = 2
lowercase__ : int = 0.0_2
lowercase__ : str = 3
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[Any] = None
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Tuple = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = None
lowercase__ : Union[str, Any] = None
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Any = TFRoFormerModel(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ : Union[str, Any] = [input_ids, input_mask]
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = True
lowercase__ : str = TFRoFormerForCausalLM(config=__lowerCAmelCase )
lowercase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Dict = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : List[str] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : Tuple = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.num_choices
lowercase__ : Dict = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
lowercase__ : List[str] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Dict = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = TFRoFormerModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : str = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
lowercase__ : str = 50000
lowercase__ : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = tf.constant([[4, 10]] )
lowercase__ : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__ : Optional[int] = emba(input_ids.shape )
lowercase__ : Optional[Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> Tuple:
# 2,12,16,64
lowercase__ : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase__ : Tuple = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ : Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase__ : Tuple = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 428
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__A : str = False
__A : Dict = False
__A : Any = False
__A : Tuple = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = False
_UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def A__ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = preprocessor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 32
|
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 1
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase : Optional[Any] = logging.get_logger(__name__)
class __A( __UpperCAmelCase ):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''', A, )
super().__init__(*A, **A )
| 105
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A( __UpperCAmelCase ):
__A = "detr"
__A = ["past_key_values"]
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, A=True, A=None, A=3, A=100, A=6, A=2048, A=8, A=6, A=2048, A=8, A=0.0, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=False, A="sine", A="resnet50", A=True, A=False, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, **A, ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A, A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(A )
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=A, **A )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def _UpperCamelCase ( cls, A, **A ):
"""simple docstring"""
return cls(backbone_config=A, **A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class __A( __UpperCAmelCase ):
__A = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 1E-5
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 12
| 105
| 1
|
import random
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> tuple[list[int], list[int]]:
_A = [ord(lowerCAmelCase_ ) for i in text]
_A = []
_A = []
for i in plain:
_A = random.randint(1 , 3_00 )
_A = (i + k) * k
cipher.append(lowerCAmelCase_ )
key.append(lowerCAmelCase_ )
return cipher, key
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = []
for i in range(len(lowerCAmelCase_ ) ):
_A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase_ ) )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 401
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase :ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase :ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase :str = "text"
lowerCamelCase :str = "labels"
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_A = copy.deepcopy(self )
_A = self.label_schema.copy()
_A = features[self.label_column]
_A = label_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 401
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A :
'''simple docstring'''
@property
def a_ ( self : int ) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def a_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def a_ ( self : str , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , ) -> Optional[int]:
"""simple docstring"""
A__ = 4
A__ = 32
A__ = (32, 32)
A__ = torch.manual_seed(0 )
A__ = torch.device(__lowerCAmelCase )
A__ = (batch_size, num_channels) + sizes
A__ = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase )
A__ = {"""hidden_states""": hidden_states}
if include_temb:
A__ = 1_28
A__ = randn_tensor((batch_size, temb_channels) , generator=__lowerCAmelCase , device=__lowerCAmelCase )
if include_res_hidden_states_tuple:
A__ = torch.manual_seed(1 )
A__ = (randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase ),)
if include_encoder_hidden_states:
A__ = floats_tensor((batch_size, 32, 32) ).to(__lowerCAmelCase )
if include_skip_sample:
A__ = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCAmelCase , device=__lowerCAmelCase )
return dummy_input
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 1_28,
}
if self.block_type == "up":
A__ = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A__ = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.block_class(**__lowerCAmelCase )
unet_block.to(__lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
A__ = unet_block(**__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = output[0]
self.assertEqual(output.shape , self.output_shape )
A__ = output[0, -1, -3:, -3:]
A__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , __lowerCAmelCase , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.block_class(**__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
A__ = model(**__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = output[0]
A__ = torch.device(__lowerCAmelCase )
A__ = randn_tensor(output.shape , device=__lowerCAmelCase )
A__ = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
| 247
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 247
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCamelCase ( ):
lowercase__ : List[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowercase__ : Union[str, Any] = Dataset.from_dict(__A )
return dataset
class UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> int:
lowercase__ : Any = get_dataset()
lowercase__ : int = make_duplicate_clusters(__lowerCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = get_dataset()
lowercase__ , lowercase__ : Optional[int] = deduplicate_dataset(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
print(__lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __lowerCAmelCase )
| 152
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ,__A ,__A ,__A ,):
'''simple docstring'''
__UpperCamelCase = len(__A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__A ,__A ,)
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
depth_first_search([] ,[] ,[] ,__A ,__A )
# Print all the boards
for board in boards:
for column in board:
print(__A )
print("""""" )
print(len(__A ) ,"""solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 601
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( __A : dict ):
'''simple docstring'''
a_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
a_ : set[int] = set()
return any(
node not in visited and depth_first_search(__A , __A , __A , __A )
for node in graph )
def _UpperCAmelCase ( __A : dict , __A : int , __A : set , __A : set ):
'''simple docstring'''
visited.add(__A )
rec_stk.add(__A )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__A , __A , __A , __A ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__A )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666
| 0
|
import operator as op
def _lowercase ( __UpperCamelCase : int ):
snake_case__ = []
snake_case__ = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
snake_case__ = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(__UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
else:
snake_case__ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
snake_case__ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 214
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase ):
_A = "bit"
_A = ["preactivation", "bottleneck"]
_A = ["SAME", "VALID"]
def __init__( self , lowercase__=3 , lowercase__=64 , lowercase__=[256, 512, 1024, 2048] , lowercase__=[3, 4, 6, 3] , lowercase__="preactivation" , lowercase__="relu" , lowercase__=None , lowercase__=32 , lowercase__=0.0 , lowercase__=False , lowercase__=32 , lowercase__=1 , lowercase__=None , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE_ : int = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Any = embedding_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : str = layer_type
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = global_padding
SCREAMING_SNAKE_CASE_ : Optional[int] = num_groups
SCREAMING_SNAKE_CASE_ : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[str] = embedding_dynamic_padding
SCREAMING_SNAKE_CASE_ : Tuple = output_stride
SCREAMING_SNAKE_CASE_ : int = width_factor
SCREAMING_SNAKE_CASE_ : Tuple = ["stem"] + [F"stage{idx}" for idx in range(1 , len(lowercase__ ) + 1 )]
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 68
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
| 1
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : list[str] | None = None, lowerCAmelCase_ : dict[str, float] | None = None, lowerCAmelCase_ : bool = False, ):
__lowerCAmelCase = cipher_alphabet or [chr(lowerCAmelCase_ ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCAmelCase = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
__lowerCAmelCase = frequencies_dict
if not case_sensitive:
__lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase = decrypted_with_shift.lower().count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase = decrypted_with_shift.count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCAmelCase = min(
lowerCAmelCase_, key=lowerCAmelCase_, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 53
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError('Invalid Input' )
lowerCamelCase_ = lowerCamelCase_ = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
lowerCamelCase_ = current_sum - array[i] + array[i + k]
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_SCREAMING_SNAKE_CASE : int = [randint(-1000, 1000) for i in range(100)]
_SCREAMING_SNAKE_CASE : Union[str, Any] = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 137
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase ( __UpperCamelCase):
__lowercase : Dict = 'biogpt'
def __init__( self , __SCREAMING_SNAKE_CASE=4_2384 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = scale_embedding
__snake_case = use_cache
__snake_case = layerdrop
__snake_case = activation_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 1_28 , lowerCamelCase=[16, 32, 64, 1_28] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 1_28 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def A_ ( self ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFEfficientFormerModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : Tuple = False
_A : List[Any] = False
_A : Any = False
def A_ ( self ):
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(lowerCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A_ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A_ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "key_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "chunk_length" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A_ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A_ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 276
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = str(a_ )
return len(a_ ) == 9 and set(a_ ) == set('123456789' )
def UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9999, 4999, -1 ):
lowerCamelCase : Any = 10_0002 * base_num
if is_9_pandigital(a_ ):
return candidate
for base_num in range(333, 99, -1 ):
lowerCamelCase : str = 100_2003 * base_num
if is_9_pandigital(a_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 133
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : Optional[Any] = controlnet_params
lowerCamelCase : Dict = 'bird'
lowerCamelCase : Optional[int] = jax.device_count()
lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase : int = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : Union[str, Any] = replicate(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Any = images[0, 253:256, 253:256, -1]
lowerCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : int = controlnet_params
lowerCamelCase : Dict = 'Chef in the kitchen'
lowerCamelCase : Dict = jax.device_count()
lowerCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase : int = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase : int = jax.random.PRNGKey(0 )
lowerCamelCase : str = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : int = replicate(UpperCAmelCase_ )
lowerCamelCase : int = shard(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = shard(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
lowerCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 133
| 1
|
from __future__ import annotations
def UpperCamelCase ( snake_case__ : str ) -> list[int]:
return [ord(snake_case__ ) - 96 for elem in plain]
def UpperCamelCase ( snake_case__ : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase ( ) -> None:
UpperCamelCase : Optional[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , snake_case__ )
print('Decoded:' , decode(snake_case__ ) )
if __name__ == "__main__":
main()
| 40
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case = '''src/transformers'''
_snake_case = '''docs/source/en'''
_snake_case = '''.'''
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
with open(snake_case__, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : str = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Union[str, Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[int] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_snake_case = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", snake_case__ )
return [m.group(0 ) for m in matches]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Any = 2 if text == "✅" or text == "❌" else len(snake_case__ )
__UpperCAmelCase : Optional[Any] = (width - text_length) // 2
__UpperCAmelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCAmelCase : Tuple = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCAmelCase : int = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
__UpperCAmelCase : Dict = None
if attr_name.endswith("Tokenizer" ):
__UpperCAmelCase : Tuple = slow_tokenizers
__UpperCAmelCase : Tuple = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__UpperCAmelCase : Dict = fast_tokenizers
__UpperCAmelCase : int = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Tuple = tf_models
__UpperCAmelCase : Optional[Any] = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCAmelCase : List[Any] = flax_models
__UpperCAmelCase : Optional[int] = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Any = pt_models
__UpperCAmelCase : Dict = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCAmelCase : int = True
break
# Try again after removing the last word in the name
__UpperCAmelCase : Dict = "".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
__UpperCAmelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__UpperCAmelCase : int = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCAmelCase : Dict = [len(snake_case__ ) + 2 for c in columns]
__UpperCAmelCase : Dict = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
__UpperCAmelCase : List[str] = "|" + "|".join([_center_text(snake_case__, snake_case__ ) for c, w in zip(snake_case__, snake_case__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__UpperCAmelCase : Any = {True: "✅", False: "❌"}
for name in model_names:
__UpperCAmelCase : str = model_name_to_prefix[name]
__UpperCAmelCase : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__, snake_case__ ) for l, w in zip(snake_case__, snake_case__ )] ) + "|\n"
return table
def _UpperCamelCase ( snake_case__=False ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
__UpperCAmelCase : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 382
| 0
|
from __future__ import annotations
def a ( A__ : int | str ) -> bool:
"""simple docstring"""
_lowercase =str(A__ )
return n == n[::-1]
def a ( A__ : int = 1000000 ) -> List[Any]:
"""simple docstring"""
_lowercase =0
for i in range(1 , A__ ):
if is_palindrome(A__ ) and is_palindrome(bin(A__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 380
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =0
_lowercase =[0]
_lowercase =[0]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
_lowercase =[60]
_lowercase =[10]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =3
_lowercase =[1, 2, 3]
_lowercase =[3, 2, 1]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 5 )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =50
_lowercase =[60, 100, 120]
_lowercase =[10, 20, 30]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 380
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
if prompt is not None:
lowerCamelCase__ = prompt
if generate_kwargs is not None:
lowerCamelCase__ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase__ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowerCamelCase__ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,_lowerCAmelCase ,**_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
F'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowerCamelCase__ = self.model.config.model_type
if model_type == "git":
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
lowerCamelCase__ = self.tokenizer(text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids
lowerCamelCase__ = [self.tokenizer.cls_token_id] + input_ids
lowerCamelCase__ = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,header_text=_lowerCAmelCase ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase__ = None
return model_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] ,_lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowerCamelCase__ = None
if generate_kwargs is None:
lowerCamelCase__ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase__ = model_inputs.pop(self.model.main_input_name )
lowerCamelCase__ = self.model.generate(_lowerCAmelCase ,**_lowerCAmelCase ,**_lowerCAmelCase )
return model_outputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
for output_ids in model_outputs:
lowerCamelCase__ = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,)
}
records.append(_lowerCAmelCase )
return records
| 50
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
| 0
|
"""simple docstring"""
import sys
import turtle
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
A_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
A_ : Any = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 616
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=3_7 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Any = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE__ = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
| 616
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
A : List[str] = 42
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 88 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , ) -> str:
super().__init__()
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : List[str] = attention_head_dim
snake_case_ : Optional[Any] = num_attention_heads * attention_head_dim
snake_case_ : Optional[Any] = in_channels
snake_case_ : Tuple = torch.nn.GroupNorm(num_groups=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , eps=1e-6 , affine=lowerCAmelCase_ )
snake_case_ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. Define transformers blocks
snake_case_ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dropout=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , double_self_attention=lowerCAmelCase_ , norm_elementwise_affine=lowerCAmelCase_ , )
for d in range(lowerCAmelCase_ )
] )
snake_case_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = hidden_states.shape
snake_case_ : Union[str, Any] = batch_frames // num_frames
snake_case_ : Any = hidden_states
snake_case_ : List[str] = hidden_states[None, :].reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
snake_case_ : Any = self.norm(lowerCAmelCase_ )
snake_case_ : int = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Tuple = self.proj_in(lowerCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
snake_case_ : Optional[int] = block(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , class_labels=lowerCAmelCase_ , )
# 3. Output
snake_case_ : Optional[Any] = self.proj_out(lowerCAmelCase_ )
snake_case_ : str = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
snake_case_ : Tuple = hidden_states.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase_ )
| 568
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowercase ( _A ):
_a : List[Any] = 'gpt_neo'
_a : Optional[int] = ['past_key_values']
_a : str = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , a=5_0_2_5_7 , a=2_0_4_8 , a=2_0_4_8 , a=2_4 , a=[[["global", "local"], 1_2]] , a=1_6 , a=None , a=2_5_6 , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=0.1 , a=1e-5 , a=0.02 , a=True , a=5_0_2_5_6 , a=5_0_2_5_6 , **a , ):
snake_case__ : Optional[Any] =vocab_size
snake_case__ : Any =max_position_embeddings
snake_case__ : str =hidden_size
snake_case__ : Optional[Any] =num_layers
snake_case__ : Dict =num_heads
snake_case__ : int =intermediate_size
snake_case__ : Optional[int] =window_size
snake_case__ : int =activation_function
snake_case__ : Union[str, Any] =resid_dropout
snake_case__ : int =embed_dropout
snake_case__ : List[Any] =attention_dropout
snake_case__ : Any =classifier_dropout
snake_case__ : List[str] =layer_norm_epsilon
snake_case__ : Any =initializer_range
snake_case__ : int =use_cache
snake_case__ : Tuple =bos_token_id
snake_case__ : Any =eos_token_id
snake_case__ : Union[str, Any] =attention_types
snake_case__ : Optional[int] =self.expand_attention_types_params(a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=a , eos_token_id=a , **a )
@staticmethod
def lowercase__ ( a ):
snake_case__ : str =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A__ ( _a : str , _a : Optional[Any] , _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
import torch
snake_case__ : List[str] =input.size()
snake_case__ : Tuple =len(_a )
snake_case__ : str =shape[dimension]
snake_case__ : Optional[int] =torch.arange(0 , _a , _a )
snake_case__ : Dict =torch.div(sizedim - size , _a , rounding_mode="""floor""" ) + 1
snake_case__ : str =torch.arange(_a ) + low_indices[:min_length][:, None]
snake_case__ : List[str] =[slice(_a )] * rank
snake_case__ : str =indices
snake_case__ : List[str] =input[s]
snake_case__ : List[str] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_a )
def A__ ( _a : str , _a : Any ):
'''simple docstring'''
import torch
snake_case__ : Any =torch.arange(1 , _a )
snake_case__ : str =torch.remainder(_a , _a )
snake_case__ : Union[str, Any] =remainders == 0
snake_case__ : Tuple =candidates[divisor_indices]
snake_case__ : Tuple =torch.max(_a )
return largest_divisor, torch.div(_a , _a , rounding_mode="""floor""" )
class _lowercase ( _A ):
@property
def lowercase__ ( self ):
snake_case__ : Dict =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction="""inputs""" )
snake_case__ : Union[str, Any] ={0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : Any ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self ):
return self._config.num_heads
def lowercase__ ( self , a , a = -1 , a = -1 , a = False , a = None , ):
snake_case__ : List[Any] =super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[str] =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : List[Any] =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : Optional[Any] =seqlen + 2
snake_case__ : Optional[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ : str =[
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
snake_case__ : Any =common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : List[Any] =ordered_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self ):
return 1_3
| 448
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Any = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__lowerCamelCase : List[str] = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class _lowercase ( _A ):
_a : int = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Tuple = ['input_ids', 'attention_mask']
_a : List[int] = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ):
snake_case__ : str =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
snake_case__ : Optional[int] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
snake_case__ : Any =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
snake_case__ : List[str] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
snake_case__ : int =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
snake_case__ : Tuple =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Union[str, Any] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case__ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sep_token=a , mask_token=a , cls_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case__ : Optional[int] =vocab_file
snake_case__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def lowercase__ ( self ):
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
snake_case__ : Optional[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
snake_case__ : List[Any] =self.__dict__.copy()
snake_case__ : Optional[int] =None
return state
def __setstate__( self , a ):
snake_case__ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Optional[Any] ={}
snake_case__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , a ):
return self.sp_model.encode(a , out_type=a )
def lowercase__ ( self , a ):
return self.sp_model.piece_to_id(a )
def lowercase__ ( self , a ):
snake_case__ : Dict =self.sp_model.IdToPiece(a )
return token
def lowercase__ ( self , a ):
snake_case__ : Optional[int] =[]
snake_case__ : Dict =""""""
snake_case__ : Dict =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
snake_case__ : Optional[Any] =True
snake_case__ : Dict =[]
else:
current_sub_tokens.append(a )
snake_case__ : List[Any] =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def lowercase__ ( self , a , a = False , a = None , a = True , **a , ):
snake_case__ : List[str] =kwargs.pop("""use_source_tokenizer""" , a )
snake_case__ : int =self.convert_ids_to_tokens(a , skip_special_tokens=a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case__ : List[Any] =[]
snake_case__ : int =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
snake_case__ : List[Any] =[]
sub_texts.append(a )
else:
current_sub_text.append(a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case__ : Dict =re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(a ) )
else:
snake_case__ : Dict ="""""".join(a )
snake_case__ : str =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case__ : Any =self.clean_up_tokenization(a )
return clean_text
else:
return text
def lowercase__ ( self , a , a = None ):
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Any =os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , """wb""" ) as fi:
snake_case__ : Any =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def lowercase__ ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Union[str, Any] =[self.cls_token_id]
snake_case__ : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , a , a = None , a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def lowercase__ ( self , a , a = None ):
snake_case__ : List[str] =[self.sep_token_id]
snake_case__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 448
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A = "bart"
__A = True
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__UpperCAmelCase =AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__UpperCAmelCase =qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase =(None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase =AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__UpperCAmelCase =AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__UpperCAmelCase =torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__UpperCAmelCase =sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase =make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> Any:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCAmelCase =faiss.StandardGpuResources()
__UpperCAmelCase =datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__UpperCAmelCase =np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase =faiss.IndexFlatIP(128 )
__UpperCAmelCase =faiss.index_cpu_to_gpu(A_ , 1 , A_ )
wikiaab_gpu_index_flat.add(A_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase =(None, None)
__UpperCAmelCase =Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__UpperCAmelCase =elia["""train_eli5"""]
__UpperCAmelCase =np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase =faiss.IndexFlatIP(128 )
eli5_train_q_index.add(A_ )
return (elia_train, eli5_train_q_index)
__A , __A , __A = load_indexes()
__A , __A , __A , __A = load_models()
__A , __A = load_train_data()
def lowercase__ ( A_: Optional[Any] , A_: Optional[Any]=10 ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =embed_questions_for_retrieval([question] , A_ , A_ )
__UpperCAmelCase , __UpperCAmelCase =eli5_train_q_index.search(A_ , A_ )
__UpperCAmelCase =[elia_train[int(A_ )] for i in I[0]]
return nn_examples
def lowercase__ ( A_: Union[str, Any] , A_: Any="wiki40b" , A_: Optional[int]="dense" , A_: Any=10 ) -> int:
"""simple docstring"""
if source == "none":
__UpperCAmelCase , __UpperCAmelCase =(""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase =query_qa_dense_index(
A_ , A_ , A_ , A_ , A_ , A_ )
else:
__UpperCAmelCase , __UpperCAmelCase =query_es_index(
A_ , A_ , index_name="""english_wiki40b_snippets_100w""" , n_results=A_ , )
__UpperCAmelCase =[
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__UpperCAmelCase ="""question: {} context: {}""".format(A_ , A_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda A_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda A_ : None),
} )
def lowercase__ ( A_: Dict , A_: List[Any] , A_: Optional[Any] , A_: Tuple=64 , A_: int=256 , A_: Dict=False , A_: Dict=2 , A_: Dict=0.9_5 , A_: Optional[int]=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase =qa_sas_generate(
A_ , A_ , A_ , num_answers=1 , num_beams=A_ , min_len=A_ , max_len=A_ , do_sample=A_ , temp=A_ , top_p=A_ , top_k=A_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__A = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__A = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__A = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__A = st.sidebar.checkbox("Demo options")
if demo_options:
__A = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__A = action_list.index(action_st)
__A = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__A = show_type == "Show full text of passages"
else:
__A = 3
__A = True
__A = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__A = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__A = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__A = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__A = "wiki40b"
__A = "dense"
__A = "beam"
__A = 2
__A = 64
__A = 2_56
__A = None
__A = None
__A = st.sidebar.checkbox("Generation options")
if generate_options:
__A = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__A = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__A = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__A = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__A = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A = None
# start main text
__A = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__A = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A = st.text_input("Enter your question here:", "")
else:
__A = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__A , __A = make_support(question, source=wiki_source, method="dense", n_results=10)
__A , __A = make_support(question, source=wiki_source, method="sparse", n_results=10)
__A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A = support_list[:10]
__A = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__A , __A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A , __A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__A = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__A = res[1].strip()
if sec_titles == "":
__A = "[{}]({})".format(res[0], wiki_url)
else:
__A = sec_titles.split(" & ")
__A = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__A = find_nearest_training(question)
__A = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__A = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__A = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 68
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __a ( _UpperCamelCase: str ) -> str:
"""simple docstring"""
return "".join(sorted(_UpperCamelCase ) )
def __a ( _UpperCamelCase: str ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(_UpperCamelCase )]
UpperCamelCase_ : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
UpperCamelCase_ : Dict = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase_ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase_ : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 185
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Dict , _lowercase : Optional[Any]=3 , _lowercase : str=32 , _lowercase : Dict=3 , _lowercase : Optional[int]=10 , _lowercase : Any=[8, 16, 32, 64] , _lowercase : str=[1, 1, 2, 1] , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=True , _lowercase : str="relu" , _lowercase : Any=3 , _lowercase : List[str]=None , _lowercase : List[Any]=["stage2", "stage3", "stage4"] , _lowercase : int=[2, 3, 4] , _lowercase : int=1 , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(_lowercase )
A = out_features
A = out_indices
A = num_groups
def __a ( self : Tuple ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __a ( self : str , _lowercase : Any , _lowercase : Optional[int] , _lowercase : int ):
A = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Any , _lowercase : str , _lowercase : Tuple , _lowercase : Optional[Any] ):
A = self.num_labels
A = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : int , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any ):
A = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : Dict ):
A = BitModelTester(self )
A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Tuple ):
return
@unittest.skip(reason='Bit does not output attentions' )
def __a ( self : Tuple ):
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __a ( self : int ):
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __a ( self : Dict ):
pass
def __a ( self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def __a ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def __a ( self : int ):
def check_hidden_states_output(_lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __a ( self : List[str] ):
pass
def __a ( self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __a ( self : List[str] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __a ( self : Union[str, Any] ):
A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
A = model(**_lowercase )
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
A = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def __a ( self : List[str] ):
A = BitModelTester(self )
| 91
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : str , **_lowercase : Union[str, Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(_lowercase )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' , self.tpu_name )
A = kwargs.pop('device_idx' , self.device_idx )
A = kwargs.pop('eager_mode' , self.eager_mode )
A = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**_lowercase )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Name of TPU"""} , )
lowerCAmelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def __a ( self : Dict ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
A = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def __a ( self : List[Any] ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __a ( self : str ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __a ( self : Any ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __a ( self : Dict ):
return self.n_gpu > 0
| 91
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = IFInpaintingPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : int ):
return self._get_dummy_components()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str=0 ):
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase__ ( self : str ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowercase__ ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase__ ( self : List[Any] ):
self._test_save_load_local()
def lowercase__ ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 272
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ = CLIPTextModel(__UpperCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" )
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = """french fries"""
lowerCamelCase_ = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = [inputs["""prompt"""]] * 2
lowerCamelCase_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCamelCase_ = image / 2 + 0.5
lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" ) )[0]
lowerCamelCase_ = components["""vae"""]
lowerCamelCase_ = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any]=0 ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
lowerCamelCase_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCamelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ = False
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCamelCase_ = """timbrooks/instruct-pix2pix"""
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 272
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Dict = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ : str = Dataset.from_dict(__UpperCAmelCase )
return dataset
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_dataset()
lowerCamelCase_ : int = make_duplicate_clusters(UpperCamelCase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = get_dataset()
lowerCamelCase_ : Optional[int] = deduplicate_dataset(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
print(UpperCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , UpperCamelCase_ )
| 720
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase__ :
def __init__( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=14 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Optional[Any]=37 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Tuple=512 , UpperCamelCase_ : Tuple=0.02 , ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[Any] = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : List[Any] = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Optional[Any] = use_input_mask
lowerCamelCase_ : Dict = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : int = rotary_dim
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Optional[int] = vocab_size - 1
lowerCamelCase_ : int = vocab_size - 1
lowerCamelCase_ : str = vocab_size - 1
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Dict = None
if self.use_input_mask:
lowerCamelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : int = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = config_and_inputs
lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 20
lowerCamelCase_ : Optional[Any] = model_class_name(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
lowerCamelCase_ : str = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCamelCase_ : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : Optional[Any] = model(
input_ids[:, -1:] , attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : int = model(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = 20
lowerCamelCase_ : int = model_class_name(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase_ : Any = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : Any = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : int = FlaxGPTJModelTester(self )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@tooslow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowerCamelCase_ : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : str = model.config.eos_token_id
lowerCamelCase_ : Any = jax.jit(model.generate )
lowerCamelCase_ : Union[str, Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Union[str, Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : Any = pt_inputs['''input_ids'''].shape
lowerCamelCase_ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
lowerCamelCase_ : str = 0
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Union[str, Any] = pt_model_class(UpperCamelCase_ ).eval()
lowerCamelCase_ : int = model_class(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = fx_state
with torch.no_grad():
lowerCamelCase_ : Optional[int] = pt_model(**UpperCamelCase_ ).to_tuple()
lowerCamelCase_ : List[Any] = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = model_class.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowerCamelCase_ : Dict = fx_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = pt_model_class(UpperCamelCase_ ).eval()
lowerCamelCase_ : str = model_class(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ : Tuple = load_flax_weights_in_pytorch_model(UpperCamelCase_ , fx_model.params )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = pt_inputs['''input_ids'''].shape
lowerCamelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase_ : List[Any] = pt_model(**UpperCamelCase_ ).to_tuple()
lowerCamelCase_ : str = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = pt_model_class.from_pretrained(UpperCamelCase_ , from_flax=UpperCamelCase_ )
with torch.no_grad():
lowerCamelCase_ : Tuple = pt_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 418
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase ( _A , _A , _A , _A = 100 , ) -> float:
lowercase : Optional[Any] = x_start
lowercase : str = fnc(_A )
lowercase : Union[str, Any] = 0.0
for _ in range(_A ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase : Union[str, Any] = (x_end - x_start) / steps + xa
lowercase : Union[str, Any] = fnc(_A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase : Any = xa
lowercase : str = fxa
return length
if __name__ == "__main__":
def UpperCamelCase ( _A ) -> Tuple:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_lowerCAmelCase = 10
while i <= 10_00_00:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 264
|
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> None:
lowercase : List[Any] = len(_A )
print("""The following activities are selected:""" )
# The first activity is always selected
lowercase : Optional[int] = 0
print(_A , end=""",""" )
# Consider rest of the activities
for j in range(_A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_A , end=""",""" )
lowercase : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 264
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_UpperCamelCase = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowercase__ )
return [m.group(0 ) for m in matches]
def _lowercase ( ):
__lowerCAmelCase : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase : Any = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowerCAmelCase : int = collections.defaultdict(lowercase__ )
__lowerCAmelCase : Dict = collections.defaultdict(lowercase__ )
__lowerCAmelCase : List[Any] = collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
__lowerCAmelCase : Optional[Any] = None
if _re_tf_models.match(lowercase__ ) is not None:
__lowerCAmelCase : Any = tf_models
__lowerCAmelCase : str = _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
__lowerCAmelCase : int = flax_models
__lowerCAmelCase : Union[str, Any] = _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
__lowerCAmelCase : Tuple = pt_models
__lowerCAmelCase : int = _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__lowerCAmelCase : Union[str, Any] = True
break
# Try again after removing the last word in the name
__lowerCAmelCase : List[str] = ''''''.join(camel_case_split(lowercase__ )[:-1] )
__lowerCAmelCase : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowerCAmelCase : List[str] = list(lowercase__ )
all_models.sort()
__lowerCAmelCase : Tuple = {'''model_type''': all_models}
__lowerCAmelCase : Any = [pt_models[t] for t in all_models]
__lowerCAmelCase : int = [tf_models[t] for t in all_models]
__lowerCAmelCase : Union[str, Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowerCAmelCase : Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowerCAmelCase : str = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowerCAmelCase : Any = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowerCAmelCase : Optional[int] = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowerCAmelCase : Any = '''AutoTokenizer'''
__lowerCAmelCase : Dict = [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowerCAmelCase : Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__lowerCAmelCase : Optional[Any] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
__lowerCAmelCase : Union[str, Any] = []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : int = get_frameworks_table()
__lowerCAmelCase : Union[str, Any] = Dataset.from_pandas(lowercase__ )
__lowerCAmelCase : List[Any] = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=lowercase__ )
__lowerCAmelCase : int = Dataset.from_json(lowercase__ )
__lowerCAmelCase : str = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(lowercase__ ) )
}
__lowerCAmelCase : Optional[Any] = update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowerCAmelCase : Optional[int] = sorted(table.keys() )
__lowerCAmelCase : Union[str, Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__lowerCAmelCase : Dict = Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(lowercase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__lowerCAmelCase : Optional[int] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__lowerCAmelCase : Union[str, Any] = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=lowercase__ , repo_type='''dataset''' , token=lowercase__ , commit_message=lowercase__ , )
def _lowercase ( ):
__lowerCAmelCase : Union[str, Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowerCAmelCase : Dict = transformers_module.pipelines.SUPPORTED_TASKS
__lowerCAmelCase : Tuple = []
for key in pipeline_tasks:
if key not in in_table:
__lowerCAmelCase : Optional[int] = pipeline_tasks[key]['''pt''']
if isinstance(lowercase__ , (list, tuple) ):
__lowerCAmelCase : int = model[0]
__lowerCAmelCase : int = model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowerCAmelCase : List[str] = ''', '''.join(lowercase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
_UpperCamelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 583
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = FunnelTokenizer
_UpperCamelCase = FunnelTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
__lowerCAmelCase : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
__lowerCAmelCase : List[Any] = tokenizer('''UNwant\u00E9d,running''' )
__lowerCAmelCase : List[str] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__lowerCAmelCase : Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 583
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
lowercase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : Any = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset")
__UpperCAmelCase : List[Any] = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2)
__UpperCAmelCase : Optional[int] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
for example in examples:
__UpperCAmelCase : List[str] = video_classifier(UpperCamelCase_)
self.assertEqual(
UpperCamelCase_ , [
{"score": ANY(UpperCamelCase_), "label": ANY(UpperCamelCase_)},
{"score": ANY(UpperCamelCase_), "label": ANY(UpperCamelCase_)},
] , )
@require_torch
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__UpperCAmelCase : Tuple = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10})
__UpperCAmelCase : str = pipeline(
"video-classification" , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4)
__UpperCAmelCase : Tuple = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset")
__UpperCAmelCase : List[str] = video_classifier(UpperCamelCase_ , top_k=2)
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
__UpperCAmelCase : Union[str, Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def a_ ( self : str):
"""simple docstring"""
pass
| 77
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 393
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = [0] * len(lowercase__ )
_lowerCamelCase : int = []
_lowerCamelCase : Dict = [1] * len(lowercase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase__ ) ):
if indegree[i] == 0:
queue.append(lowercase__ )
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase : Optional[int] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase__ )
print(max(lowercase__ ) )
# Adjacency list of Graph
lowercase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 717
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
_lowerCamelCase : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( lowercase__ ):
return sum(int(lowercase__ ) for c in str(abs(lowercase__ ) ) )
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
_lowerCamelCase : int = f'''{func.__name__}({value})'''
_lowerCamelCase : Optional[Any] = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(lowercase__ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 492
| 0
|
class snake_case_ :
'''simple docstring'''
def __init__( self, A_ ) -> None:
UpperCAmelCase__ =size
UpperCAmelCase__ =[0] * size
UpperCAmelCase__ =[0] * size
@staticmethod
def __UpperCAmelCase ( A_ ) -> int:
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( A_ ) -> int:
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self, A_, A_ ) -> None:
UpperCAmelCase__ =value
while index < self.size:
UpperCAmelCase__ =self.get_prev(A_ ) + 1
if current_left_border == index:
UpperCAmelCase__ =value
else:
UpperCAmelCase__ =max(A_, A_, A_ )
UpperCAmelCase__ =self.get_next(A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase__ =0
while left <= right:
UpperCAmelCase__ =self.get_prev(A_ )
if left <= current_left:
UpperCAmelCase__ =max(A_, self.tree[right] )
UpperCAmelCase__ =current_left
else:
UpperCAmelCase__ =max(A_, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'EncodecFeatureExtractor'
__UpperCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self, A_, A_ ) -> Optional[int]:
super().__init__(A_, A_ )
UpperCAmelCase__ =self.feature_extractor
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_=None, A_=None, A_=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A_, language=A_, no_timestamps=A_ )
def __call__( self, *A_, **A_ ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_, **A_ )
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("sampling_rate", A_ )
UpperCAmelCase__ =kwargs.pop("text", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCAmelCase__ =self.tokenizer(A_, **A_ )
if audio is not None:
UpperCAmelCase__ =self.feature_extractor(A_, *A_, sampling_rate=A_, **A_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase__ =audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCAmelCase__ =audio_inputs["padding_mask"]
return inputs
def __UpperCAmelCase ( self, *A_, **A_ ) -> Dict:
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("padding_mask", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio_values is not None:
return self._decode_audio(A_, padding_mask=A_ )
else:
return self.tokenizer.batch_decode(*A_, **A_ )
def __UpperCAmelCase ( self, *A_, **A_ ) -> int:
return self.tokenizer.decode(*A_, **A_ )
def __UpperCAmelCase ( self, A_, A_ = None ) -> List[np.ndarray]:
UpperCAmelCase__ =to_numpy(A_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =audio_values.shape
if padding_mask is None:
return list(A_ )
UpperCAmelCase__ =to_numpy(A_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase__ =seq_len - padding_mask.shape[-1]
UpperCAmelCase__ =1 - self.feature_extractor.padding_value
UpperCAmelCase__ =np.pad(A_, ((0, 0), (0, difference)), "constant", constant_values=A_ )
UpperCAmelCase__ =audio_values.tolist()
for i in range(A_ ):
UpperCAmelCase__ =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase__ =sliced_audio.reshape(A_, -1 )
return audio_values
| 625
| 1
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase_ = json.loads(snake_case_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase_ = json.loads(snake_case_ )
if not mpi_options.get("sagemaker_mpi_enabled" , snake_case_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __A ( UpperCamelCase__ ):
a__ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def _lowercase (self : Dict ):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __a , )
@cached_property
def _lowercase (self : str ):
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCAmelCase_ = torch.device("cpu" )
UpperCAmelCase_ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase_ = smp.local_rank()
UpperCAmelCase_ = torch.device("cuda" , __a )
UpperCAmelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCAmelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCAmelCase_ = torch.device("cuda" , self.local_rank )
UpperCAmelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCAmelCase_ = torch.device("cuda" , self.local_rank )
UpperCAmelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(__a )
return device
@property
def _lowercase (self : List[Any] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _lowercase (self : List[Any] ):
return not is_sagemaker_model_parallel_available()
@property
def _lowercase (self : List[str] ):
return False
| 702
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return sum(int(snake_case_ ) for c in str(abs(snake_case_ ) ) )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(snake_case_ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 415
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE : int = jax.image.resize(
UpperCAmelCase_ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
SCREAMING_SNAKE_CASE : Optional[int] = self.conv(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , UpperCAmelCase_ : Tuple ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE : int = self.conv(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : int = None
UpperCamelCase_ : float = 0.0
UpperCamelCase_ : bool = None
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Dict = nn.Dense(UpperCAmelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : Optional[int] = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : str = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=True ):
SCREAMING_SNAKE_CASE : Any = hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.swish(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.conva(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase_ , 1 ) , 1 )
SCREAMING_SNAKE_CASE : List[str] = hidden_states + temb
SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.swish(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.dropout(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.conva(UpperCAmelCase_ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : Tuple = self.conv_shortcut(UpperCAmelCase_ )
return hidden_states + residual
| 62
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCamelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase_ : List[Any] ):
if isinstance(UpperCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : Tuple = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
]
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = 10.0
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : Any = scale
SCREAMING_SNAKE_CASE : List[str] = pipe(**UpperCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : List[Any] = pipe(**UpperCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : Dict = scale
SCREAMING_SNAKE_CASE : Dict = pipe(**UpperCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _A ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase_ , controlnet=UpperCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = "evil space-punk bird"
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : str = pipe(
UpperCAmelCase_ , UpperCAmelCase_ , control_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 62
| 1
|
from __future__ import annotations
lowercase_ : int = tuple[int, int, int]
lowercase_ : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase_ : Union[str, Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase_ : List[str] = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
lowercase_ : List[Any] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
lowercase_ : Any = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
lowercase_ : List[str] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
lowercase_ : Dict = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
lowercase_ : Tuple = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
lowercase_ : List[str] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
lowercase_ : Optional[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
lowercase_ : Dict = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
lowercase_ : List[str] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def _lowerCAmelCase ( lowerCamelCase__ : RotorPositionT, lowerCamelCase__ : RotorSelectionT, lowerCamelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase__ ) )) < 3:
_SCREAMING_SNAKE_CASE : Tuple = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCamelCase__ )
# Checks if rotor positions are valid
_SCREAMING_SNAKE_CASE : Tuple = rotpos
if not 0 < rotorposa <= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCamelCase__ )
if not 0 < rotorposa <= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase__ )
if not 0 < rotorposa <= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : int = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase__ )
# Validates string and returns dict
_SCREAMING_SNAKE_CASE : Any = _plugboard(lowerCamelCase__ )
return rotpos, rotsel, pbdict
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = f'''Plugboard setting isn\'t type string ({type(lowerCamelCase__ )})'''
raise TypeError(lowerCamelCase__ )
elif len(lowerCamelCase__ ) % 2 != 0:
_SCREAMING_SNAKE_CASE : Dict = f'''Odd number of symbols ({len(lowerCamelCase__ )})'''
raise Exception(lowerCamelCase__ )
elif pbstring == "":
return {}
pbstring.replace(" ", "" )
# Checks if all characters are unique
_SCREAMING_SNAKE_CASE : Optional[Any] = set()
for i in pbstring:
if i not in abc:
_SCREAMING_SNAKE_CASE : Optional[Any] = f'''\'{i}\' not in list of symbols'''
raise Exception(lowerCamelCase__ )
elif i in tmppbl:
_SCREAMING_SNAKE_CASE : Optional[Any] = f'''Duplicate symbol ({i})'''
raise Exception(lowerCamelCase__ )
else:
tmppbl.add(lowerCamelCase__ )
del tmppbl
# Created the dictionary
_SCREAMING_SNAKE_CASE : Dict = {}
for j in range(0, len(lowerCamelCase__ ) - 1, 2 ):
_SCREAMING_SNAKE_CASE : Tuple = pbstring[j + 1]
_SCREAMING_SNAKE_CASE : Optional[int] = pbstring[j]
return pb
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : RotorPositionT, lowerCamelCase__ : RotorSelectionT = (rotora, rotora, rotora), lowerCamelCase__ : str = "", ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = text.upper()
_SCREAMING_SNAKE_CASE : int = _validator(
lowerCamelCase__, lowerCamelCase__, plugb.upper() )
_SCREAMING_SNAKE_CASE : Union[str, Any] = rotor_position
_SCREAMING_SNAKE_CASE : str = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_SCREAMING_SNAKE_CASE : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_SCREAMING_SNAKE_CASE : str = plugboard[symbol]
# rotor ra --------------------------
_SCREAMING_SNAKE_CASE : int = abc.index(lowerCamelCase__ ) + rotorposa
_SCREAMING_SNAKE_CASE : Any = rotora[index % len(lowerCamelCase__ )]
# rotor rb --------------------------
_SCREAMING_SNAKE_CASE : List[str] = abc.index(lowerCamelCase__ ) + rotorposa
_SCREAMING_SNAKE_CASE : Union[str, Any] = rotora[index % len(lowerCamelCase__ )]
# rotor rc --------------------------
_SCREAMING_SNAKE_CASE : str = abc.index(lowerCamelCase__ ) + rotorposa
_SCREAMING_SNAKE_CASE : Dict = rotora[index % len(lowerCamelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_SCREAMING_SNAKE_CASE : Optional[Any] = reflector[symbol]
# 2nd rotors
_SCREAMING_SNAKE_CASE : Dict = abc[rotora.index(lowerCamelCase__ ) - rotorposa]
_SCREAMING_SNAKE_CASE : Any = abc[rotora.index(lowerCamelCase__ ) - rotorposa]
_SCREAMING_SNAKE_CASE : Tuple = abc[rotora.index(lowerCamelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_SCREAMING_SNAKE_CASE : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
lowercase_ : Any = '''This is my Python script that emulates the Enigma machine from WWII.'''
lowercase_ : Optional[int] = (1, 1, 1)
lowercase_ : Tuple = '''pictures'''
lowercase_ : Any = (rotora, rotora, rotora)
lowercase_ : Union[str, Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 708
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = 42
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = ("DownEncoderBlock2D",) , snake_case__ = ("UpDecoderBlock2D",) , snake_case__ = (64,) , snake_case__ = 1 , snake_case__ = "silu" , snake_case__ = 3 , snake_case__ = 32 , snake_case__ = 256 , snake_case__ = 32 , snake_case__ = None , snake_case__ = 0.18_215 , snake_case__ = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
_SCREAMING_SNAKE_CASE : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
_SCREAMING_SNAKE_CASE : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
_SCREAMING_SNAKE_CASE : Optional[int] = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = True ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.encoder(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
"""simple docstring"""
if not force_not_quantize:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.quantize(snake_case__ )
else:
_SCREAMING_SNAKE_CASE : int = h
_SCREAMING_SNAKE_CASE : Dict = self.post_quant_conv(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = True ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = sample
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.encode(snake_case__ ).latents
_SCREAMING_SNAKE_CASE : Any = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 295
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCamelCase_ : Dict = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCamelCase_ : int = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCamelCase_ : Optional[Any] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCamelCase_ : Optional[int] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCamelCase_ : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) ,homepage="https://github.com/openai/human-eval" ,codebase_urls=["https://github.com/openai/human-eval"] ,reference_urls=["https://github.com/openai/human-eval"] ,license=_LICENSE ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=[1, 10, 100] ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3.0 ) -> Any:
if os.getenv("HF_ALLOW_CODE_EVAL" ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
_snake_case = []
_snake_case = Counter()
_snake_case = 0
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_snake_case = candidate + "\n" + test_case
_snake_case = (test_program, timeout, task_id, completion_id[task_id])
_snake_case = executor.submit(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
_snake_case = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_snake_case , _snake_case = [], []
for result in results.values():
result.sort()
_snake_case = [r[1]["passed"] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = k
_snake_case = {f"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Any , _UpperCamelCase: List[str] ) -> List[Any]:
"""simple docstring"""
def estimator(_UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = itertools.repeat(_UpperCamelCase , len(_UpperCamelCase ) )
else:
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
_snake_case = iter(_UpperCamelCase )
return np.array([estimator(int(_UpperCamelCase ) , int(_UpperCamelCase ) , _UpperCamelCase ) for n, c in zip(_UpperCamelCase , _UpperCamelCase )] )
| 185
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str
_lowerCamelCase :int
def lowercase_ ( __UpperCAmelCase ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def lowercase_ ( __UpperCAmelCase ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
lowerCAmelCase__ : Union[str, Any] = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCAmelCase__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
lowerCAmelCase__ : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
lowerCAmelCase__ : Dict = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_A = """Provide a string that I will generate its BWT transform: """
_A = input(entry_msg).strip()
_A = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
_A = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 507
|
"""simple docstring"""
import operator
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ) -> list:
lowerCAmelCase__ : int = operator.lt if reverse else operator.gt
lowerCAmelCase__ : Optional[int] = solution or []
if not arr:
return solution
lowerCAmelCase__ : Tuple = [arr.pop(0 )]
for i, item in enumerate(__UpperCAmelCase ):
if _operator(__UpperCAmelCase , sublist[-1] ):
sublist.append(__UpperCAmelCase )
arr.pop(__UpperCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__UpperCAmelCase )
else:
while sublist:
lowerCAmelCase__ : Any = sublist.pop(0 )
for i, xx in enumerate(__UpperCAmelCase ):
if not _operator(__UpperCAmelCase , __UpperCAmelCase ):
solution.insert(__UpperCAmelCase , __UpperCAmelCase )
break
else:
solution.append(__UpperCAmelCase )
strand_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 507
| 1
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=9_9 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Union[str, Any] = batch_size
lowerCAmelCase__ :Any = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Dict = use_input_lengths
lowerCAmelCase__ :int = use_token_type_ids
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :List[Any] = gelu_activation
lowerCAmelCase__ :List[str] = sinusoidal_embeddings
lowerCAmelCase__ :Optional[Any] = causal
lowerCAmelCase__ :Optional[Any] = asm
lowerCAmelCase__ :Optional[Any] = n_langs
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Optional[int] = n_special
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Tuple = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Optional[int] = type_sequence_label_size
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :Union[str, Any] = num_labels
lowerCAmelCase__ :Union[str, Any] = num_choices
lowerCAmelCase__ :Tuple = summary_type
lowerCAmelCase__ :int = use_proj
lowerCAmelCase__ :Any = scope
lowerCAmelCase__ :List[Any] = bos_token_id
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :List[str] = None
if self.use_input_lengths:
lowerCAmelCase__ :Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase__ :Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :List[Any] = None
if self.use_labels:
lowerCAmelCase__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Any = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = XLMModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = XLMWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = XLMForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = XLMForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase )
lowerCAmelCase__ :str = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
lowerCAmelCase__ :int = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((lowerCAmelCase__) , ) :Tuple = result_with_labels.to_tuple()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((lowerCAmelCase__) , ) :Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = XLMForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )
lowerCAmelCase__ :str = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :Any = XLMForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.num_choices
lowerCAmelCase__ :str = XLMForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Any = config_and_inputs
lowerCAmelCase__ :Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__magic_name__ :List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__magic_name__ :Any = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Dict = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCAmelCase__ :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = XLMModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(__UpperCAmelCase ) )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
lowerCAmelCase__ :Tuple = min_length + idx + 1
lowerCAmelCase__ :int = min_length + idx + 1
lowerCAmelCase__ :Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCAmelCase ) , )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
lowerCAmelCase__ :Tuple = min_length + idx + 1
lowerCAmelCase__ :Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCAmelCase ) , )
pass
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[str] = XLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__UpperCAmelCase ) # the president
lowerCAmelCase__ :Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCAmelCase )
| 93
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Tuple ) ->Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase__ : Optional[Any] =TOKENIZER_CLASSES
else:
lowerCamelCase__ : Any ={tokenizer_name: getattr(snake_case_ , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase__ : Any =TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase__ : List[Any] =True
if checkpoint_name is None:
lowerCamelCase__ : Union[str, Any] =list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase__ : Optional[Any] =[checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase__ : Dict =tokenizer_class.from_pretrained(snake_case_ , force_download=snake_case_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase__ , lowerCamelCase__ : List[str] =checkpoint.split('/' )
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
elif add_prefix:
lowerCamelCase__ : Union[str, Any] =checkpoint
lowerCamelCase__ : List[Any] =dump_path
else:
lowerCamelCase__ : str =None
lowerCamelCase__ : Dict =dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase__ : int =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase__ : Union[str, Any] =file_path.split(snake_case_ )[-1][0]
if next_char == "/":
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
lowerCamelCase__ : int =None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase__ : Optional[Any] =tokenizer.save_pretrained(
snake_case_ , legacy_format=snake_case_ , filename_prefix=snake_case_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(snake_case_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 174
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( a__):
_lowerCAmelCase = '''segformer'''
def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1e-6, A=256, A=255, **A, ):
"""simple docstring"""
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A, )
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : Any = sr_ratios
lowerCamelCase : List[Any] = hidden_sizes
lowerCamelCase : List[Any] = patch_sizes
lowerCamelCase : Tuple = strides
lowerCamelCase : int = mlp_ratios
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = classifier_dropout_prob
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Union[str, Any] = drop_path_rate
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : Optional[int] = kwargs.get('reshape_last_stage', A )
lowerCamelCase : Any = semantic_loss_ignore_index
class __snake_case ( a__):
_lowerCAmelCase = version.parse('''1.11''')
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 12
| 449
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase (__UpperCAmelCase ):
'''simple docstring'''
a__ = 'microsoft/speecht5_tts'
a__ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
a__ = 'text_reader'
a__ = SpeechTaProcessor
a__ = SpeechTaForTextToSpeech
a__ = SpeechTaHifiGan
a__ = ['text']
a__ = ['audio']
def _a ( self ):
"""simple docstring"""
if self.post_processor is None:
snake_case_ :Dict = '''microsoft/speecht5_hifigan'''
super().setup()
def _a ( self , a , a=None ):
"""simple docstring"""
snake_case_ :Tuple = self.pre_processor(text=lowercase_ , return_tensors="pt" , truncation=lowercase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
snake_case_ :Tuple = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
snake_case_ :List[str] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a ( self , a ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowercase_ )
def _a ( self , a ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowercase_ ).cpu().detach()
| 584
|
def A_ ( lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_snake_case : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 0
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase : str = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowerCamelCase__ ( A : int=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class UpperCamelCase__( __UpperCAmelCase ):
__magic_name__ : str = None
__magic_name__ : str = None
def a__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
UpperCAmelCase = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
UpperCAmelCase = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=__lowerCAmelCase )
UpperCAmelCase = import_main_class(dataset_module.module_path )
UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase = None
builder_instance.download_and_prepare()
UpperCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=__lowerCAmelCase )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=__lowerCAmelCase )
UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
UpperCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['''train'''] , __lowerCAmelCase )
assert next(iter(ds['''train'''] ) )
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 503
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
# load base model
_A : Optional[int] = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_A : str = load_file(UpperCamelCase__ )
_A : Any = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_A : int = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
_A : int = pipeline.text_encoder
else:
_A : Optional[int] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
_A : int = pipeline.unet
# find the target layer
_A : str = layer_infos.pop(0 )
while len(UpperCamelCase__ ) > -1:
try:
_A : str = curr_layer.__getattr__(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_A : Optional[Any] = layer_infos.pop(0 )
elif len(UpperCamelCase__ ) == 0:
break
except Exception:
if len(UpperCamelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_A : List[str] = layer_infos.pop(0 )
_A : int = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(UpperCamelCase__ )
else:
pair_keys.append(UpperCamelCase__ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_A : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_A : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase__ , UpperCamelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_A : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
_A : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase__ , UpperCamelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase__ )
return pipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.base_model_path
lowerCAmelCase__ = args.checkpoint_path
lowerCAmelCase__ = args.dump_path
lowerCAmelCase__ = args.lora_prefix_unet
lowerCAmelCase__ = args.lora_prefix_text_encoder
lowerCAmelCase__ = args.alpha
lowerCAmelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 503
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str):
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 6
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6
| 1
|
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( ):
with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as f:
_A = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
_A = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
_A = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_A = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
_A = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_A = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
_A = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_A = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
_A = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_A = temp
return maximum
if __name__ == "__main__":
print(solution())
| 107
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCAmelCase : int = logging.getLogger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple ) -> int:
_A = False
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any] ) -> Any:
if not self.initialized:
_A = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, index=UpperCamelCase__, init_retrieval=UpperCamelCase__, )
_A = True
def __UpperCAmelCase ( self : Any ) -> List[Any]:
self.retriever.index.init_index()
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ) -> Optional[Any]:
_A , _A = self.retriever._main_retrieve(UpperCamelCase__, UpperCamelCase__ )
return doc_ids, retrieved_doc_embeds
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int=None ) -> Optional[int]:
if index is not None and index.is_initialized() and len(UpperCamelCase__ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, index=UpperCamelCase__, init_retrieval=UpperCamelCase__, )
_A = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self : Dict ) -> int:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple ) -> Dict:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_A = self.retrieval_workers[random.randint(0, len(self.retrieval_workers ) - 1 )]
_A , _A = ray.get(random_worker.retrieve.remote(UpperCamelCase__, UpperCamelCase__ ) )
else:
_A , _A = self._main_retrieve(UpperCamelCase__, UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any]=None, **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
return super(UpperCamelCase__, cls ).get_tokenizers(UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : List[Any]=None, **UpperCamelCase__ : List[str] ) -> Tuple:
_A = kwargs.pop('config', UpperCamelCase__ ) or RagConfig.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
_A = RagTokenizer.from_pretrained(UpperCamelCase__, config=UpperCamelCase__ )
_A = rag_tokenizer.question_encoder
_A = rag_tokenizer.generator
if indexed_dataset is not None:
_A = 'custom'
_A = CustomHFIndex(config.retrieval_vector_size, UpperCamelCase__ )
else:
_A = cls._build_index(UpperCamelCase__ )
return cls(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, retrieval_workers=UpperCamelCase__, index=UpperCamelCase__, )
| 107
| 1
|
from __future__ import annotations
lowerCAmelCase__ : int = []
def UpperCamelCase__ ( A__ , A__ , A__ ) -> bool:
for i in range(len(A__ ) ):
if board[row][i] == 1:
return False
for i in range(len(A__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , len(A__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase__ ( A__ , A__ ) -> bool:
if row >= len(A__ ):
solution.append(A__ )
printboard(A__ )
print()
return True
for i in range(len(A__ ) ):
if is_safe(A__ , A__ , A__ ):
snake_case__ : Dict = 1
solve(A__ , row + 1 )
snake_case__ : List[Any] = 0
return False
def UpperCamelCase__ ( A__ ) -> None:
for i in range(len(A__ ) ):
for j in range(len(A__ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase__ : List[Any] = 8
lowerCAmelCase__ : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 711
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return choice(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
snake_case__ : Tuple = random_pivot(__SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
snake_case__ : int = [e for e in lst if e < pivot]
snake_case__ : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(__SCREAMING_SNAKE_CASE , k - len(__SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case__ : str = SavedModel()
snake_case__ : Union[str, Any] = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
snake_case__ : Optional[Any] = json.load(__SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ : Optional[int] = sorted(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
A_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 270
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase = [1, 2, 3]
with pytest.raises(lowercase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase_ , lowercase_ , num_proc=2 )
with pytest.raises(lowercase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase_ , lowercase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = [1, 2]
lowercase = {"""a""": 1, """b""": 2}
lowercase = {"""a""": [1, 2], """b""": [3, 4]}
lowercase = {"""a""": {"""1""": 1}, """b""": 2}
lowercase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase = [2, 3]
lowercase = {"""a""": 2, """b""": 3}
lowercase = {"""a""": [2, 3], """b""": [4, 5]}
lowercase = {"""a""": {"""1""": 2}, """b""": 3}
lowercase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
| 701
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653
| 0
|
'''simple docstring'''
from manim import *
class __lowercase ( __lowerCamelCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = Rectangle(height=0.5 ,width=0.5 )
__lowercase = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
__lowercase = Rectangle(height=0.2_5 ,width=0.2_5 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = VGroup(a_ ,a_ ).arrange(a_ ,buff=0 )
__lowercase = Text('''CPU''' ,font_size=24 )
__lowercase = Group(a_ ,a_ ).arrange(a_ ,buff=0.5 ,aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = Text('''GPU''' ,font_size=24 )
__lowercase = Group(a_ ,a_ ).arrange(a_ ,buff=0.5 ,aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = Text('''Model''' ,font_size=24 )
__lowercase = Group(a_ ,a_ ).arrange(a_ ,buff=0.5 ,aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(a_ ):
__lowercase = fill.copy().set_fill(a_ ,opacity=0.8 )
target.move_to(a_ )
model_arr.append(a_ )
__lowercase = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(a_ ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a_ )
self.add(*a_ ,*a_ )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = VGroup(*a_ ).arrange(a_ ,buff=0 )
__lowercase = VGroup(a_ ,a_ ).arrange(a_ ,buff=0 )
__lowercase = Text('''Disk''' ,font_size=24 )
__lowercase = Group(a_ ,a_ ).arrange(a_ ,buff=0.5 ,aligned_edge=a_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(a_ ,a_ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(a_ ,a_ )
__lowercase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" ,font_size=18 ,)
blue_text.next_to(a_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(a_ )
__lowercase = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ) )
__lowercase = Square(0.3 )
input.set_fill(a_ ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,a_ ,buff=0.5 )
self.play(Write(a_ ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=a_ ,buff=0.0_2 )
self.play(MoveToTarget(a_ ) )
self.play(FadeOut(a_ ) )
__lowercase = Arrow(start=a_ ,end=a_ ,color=a_ ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,a_ ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowercase = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ,run_time=3 ) )
__lowercase = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(a_ ) ,Circumscribe(model_arr[0] ,color=a_ ,**a_ ) ,Circumscribe(model_cpu_arr[0] ,color=a_ ,**a_ ) ,Circumscribe(gpu_rect[0] ,color=a_ ,**a_ ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 ,a_ ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__lowercase = AnimationGroup(
FadeOut(a_ ,run_time=0.5 ) ,MoveToTarget(a_ ,run_time=0.5 ) ,FadeIn(a_ ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(a_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**a_ ) ,Circumscribe(cpu_left_col_base[i] ,**a_ ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=a_ ,**a_ ) ,Circumscribe(gpu_rect[0] ,color=a_ ,**a_ ) ,Circumscribe(model_arr[i + 1] ,color=a_ ,**a_ ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.0_2 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=a_ ,**a_ ) ,Circumscribe(cpu_left_col_base[-1] ,color=a_ ,**a_ ) ,Circumscribe(gpu_rect[0] ,color=a_ ,**a_ ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowercase = a_c
__lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.0_2 ,buff=0.5 )
self.play(
FadeOut(a_ ) ,FadeOut(a_ ,run_time=0.5 ) ,)
__lowercase = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ,run_time=3 ) ,MoveToTarget(a_ ) )
self.wait()
| 502
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ : Optional[int] = test_metrics
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _UpperCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
| 250
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A : Optional[int] = logging.get_logger(__name__)
# General docstring
__A : List[Any] = 'RegNetConfig'
# Base docstring
__A : List[Any] = 'facebook/regnet-y-040'
__A : str = [1, 1_088, 7, 7]
# Image classification docstring
__A : Any = 'facebook/regnet-y-040'
__A : str = 'tabby, tabby cat'
__A : List[str] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :int ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :Optional[str] = "relu" ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(**_UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case_ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case_ : Any = tf.keras.layers.ConvaD(
filters=_UpperCamelCase ,kernel_size=_UpperCamelCase ,strides=_UpperCamelCase ,padding="""VALID""" ,groups=_UpperCamelCase ,use_bias=_UpperCamelCase ,name="""convolution""" ,)
snake_case_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="""normalization""" )
snake_case_ : str = ACTaFN[activation] if activation is not None else tf.identity
def a__ ( self :Optional[int] ,_UpperCamelCase :Tuple ):
snake_case_ : List[Any] = self.convolution(self.padding(_UpperCamelCase ) )
snake_case_ : Optional[int] = self.normalization(_UpperCamelCase )
snake_case_ : int = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] ,_UpperCamelCase :RegNetConfig ,**_UpperCamelCase :int ):
super().__init__(**_UpperCamelCase )
snake_case_ : List[Any] = config.num_channels
snake_case_ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="""embedder""" ,)
def a__ ( self :Optional[Any] ,_UpperCamelCase :str ):
snake_case_ : str = shape_list(_UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case_ : Optional[Any] = tf.transpose(_UpperCamelCase ,perm=(0, 2, 3, 1) )
snake_case_ : Optional[Any] = self.embedder(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :int = 2 ,**_UpperCamelCase :Tuple ):
super().__init__(**_UpperCamelCase )
snake_case_ : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCamelCase ,kernel_size=1 ,strides=_UpperCamelCase ,use_bias=_UpperCamelCase ,name="""convolution""" )
snake_case_ : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="""normalization""" )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :bool = False ):
return self.normalization(self.convolution(_UpperCamelCase ) ,training=_UpperCamelCase )
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Tuple ,_UpperCamelCase :int ,_UpperCamelCase :int ,**_UpperCamelCase :List[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase ,name="""pooler""" )
snake_case_ : int = [
tf.keras.layers.ConvaD(filters=_UpperCamelCase ,kernel_size=1 ,activation="""relu""" ,name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_UpperCamelCase ,kernel_size=1 ,activation="""sigmoid""" ,name="""attention.2""" ),
]
def a__ ( self :Any ,_UpperCamelCase :Dict ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case_ : Optional[int] = self.pooler(_UpperCamelCase )
for layer_module in self.attention:
snake_case_ : Optional[int] = layer_module(_UpperCamelCase )
snake_case_ : str = hidden_state * pooled
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :List[str] ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 1 ,**_UpperCamelCase :Dict ):
super().__init__(**_UpperCamelCase )
snake_case_ : List[str] = in_channels != out_channels or stride != 1
snake_case_ : Optional[int] = max(1 ,out_channels // config.groups_width )
snake_case_ : List[str] = (
TFRegNetShortCut(_UpperCamelCase ,stride=_UpperCamelCase ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case_ : Any = [
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ,name="""layer.2""" ),
]
snake_case_ : Any = ACTaFN[config.hidden_act]
def a__ ( self :List[str] ,_UpperCamelCase :int ):
snake_case_ : Optional[Any] = hidden_state
for layer_module in self.layers:
snake_case_ : List[Any] = layer_module(_UpperCamelCase )
snake_case_ : str = self.shortcut(_UpperCamelCase )
hidden_state += residual
snake_case_ : List[str] = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :List[str] ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 1 ,**_UpperCamelCase :Tuple ):
super().__init__(**_UpperCamelCase )
snake_case_ : int = in_channels != out_channels or stride != 1
snake_case_ : Optional[int] = max(1 ,out_channels // config.groups_width )
snake_case_ : Optional[int] = (
TFRegNetShortCut(_UpperCamelCase ,stride=_UpperCamelCase ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
snake_case_ : Dict = [
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetSELayer(_UpperCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name="""layer.2""" ),
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ,name="""layer.3""" ),
]
snake_case_ : Any = ACTaFN[config.hidden_act]
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Dict = hidden_state
for layer_module in self.layers:
snake_case_ : int = layer_module(_UpperCamelCase )
snake_case_ : Optional[Any] = self.shortcut(_UpperCamelCase )
hidden_state += residual
snake_case_ : List[Any] = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 2 ,_UpperCamelCase :int = 2 ,**_UpperCamelCase :Dict ):
super().__init__(**_UpperCamelCase )
snake_case_ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
snake_case_ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ,name="""layers.0""" ),
*[layer(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def a__ ( self :List[str] ,_UpperCamelCase :Any ):
for layer_module in self.layers:
snake_case_ : Optional[int] = layer_module(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,**_UpperCamelCase :List[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="""stages.0""" ,) )
snake_case_ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCamelCase ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,depth=_UpperCamelCase ,name=F'''stages.{i+1}''' ) )
def a__ ( self :Any ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ):
snake_case_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ : Dict = hidden_states + (hidden_state,)
snake_case_ : Optional[int] = stage_module(_UpperCamelCase )
if output_hidden_states:
snake_case_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase ,hidden_states=_UpperCamelCase )
@keras_serializable
class __UpperCamelCase ( tf.keras.layers.Layer ):
lowercase : List[str] = RegNetConfig
def __init__( self :List[Any] ,_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Optional[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : int = config
snake_case_ : Union[str, Any] = TFRegNetEmbeddings(_UpperCamelCase ,name="""embedder""" )
snake_case_ : List[str] = TFRegNetEncoder(_UpperCamelCase ,name="""encoder""" )
snake_case_ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase ,name="""pooler""" )
@unpack_inputs
def a__ ( self :Tuple ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :bool = False ,):
snake_case_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : str = self.embedder(_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : int = self.encoder(
_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Optional[int] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
snake_case_ : Dict = tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) )
snake_case_ : Optional[int] = tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case_ : Dict = tuple([tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase ,pooler_output=_UpperCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = RegNetConfig
lowercase : Optional[Any] = 'regnet'
lowercase : Tuple = 'pixel_values'
@property
def a__ ( self :Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa )}
__A : Dict = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : Optional[Any] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowercase__ , )
class __UpperCamelCase ( lowercase__ ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,*_UpperCamelCase :Optional[Any] ,**_UpperCamelCase :Optional[Any] ):
super().__init__(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : List[Any] = TFRegNetMainLayer(_UpperCamelCase ,name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def a__ ( self :Any ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Any=False ,):
snake_case_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Optional[Any] = self.regnet(
pixel_values=_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowercase__ , )
class __UpperCamelCase ( lowercase__ , lowercase__ ):
def __init__( self :Dict ,_UpperCamelCase :RegNetConfig ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :List[str] ):
super().__init__(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : List[Any] = config.num_labels
snake_case_ : int = TFRegNetMainLayer(_UpperCamelCase ,name="""regnet""" )
# classification head
snake_case_ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def a__ ( self :List[str] ,_UpperCamelCase :tf.Tensor = None ,_UpperCamelCase :tf.Tensor = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :Dict=False ,):
snake_case_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : List[Any] = self.regnet(
_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : Optional[int] = self.classifier[0](_UpperCamelCase )
snake_case_ : Union[str, Any] = self.classifier[1](_UpperCamelCase )
snake_case_ : int = None if labels is None else self.hf_compute_loss(labels=_UpperCamelCase ,logits=_UpperCamelCase )
if not return_dict:
snake_case_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCamelCase ,logits=_UpperCamelCase ,hidden_states=outputs.hidden_states )
| 267
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__A : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :nn.ModuleList , lowerCamelCase_ :nn.ModuleList , lowerCamelCase_ :List[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F'''{len(lowerCamelCase_ )} != {len(lowerCamelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__A : Optional[int] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__A : Dict = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
try:
snake_case_ : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCamelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, PreTrainedModel] , lowerCamelCase_ :Union[str, Path] = "student" , lowerCamelCase_ :Union[int, None] = None , lowerCamelCase_ :Union[int, None] = None , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :Dict , ):
'''simple docstring'''
snake_case_ : Optional[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ ) # purely for convenience
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ).eval()
else:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), F'''teacher must be a model or string got type {type(lowerCamelCase_ )}'''
snake_case_ : Any = teacher.config.to_diff_dict()
try:
snake_case_ , snake_case_ : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case_ : Dict = teacher_e
if d is None:
snake_case_ : List[Any] = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
snake_case_ , snake_case_ : Tuple = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case_ , snake_case_ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case_ : Optional[int] = teacher_e
if d is None:
snake_case_ : List[str] = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase_ )
# Copy weights
snake_case_ : List[Any] = teacher.config_class(**lowerCamelCase_ )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case_ : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case_ , snake_case_ : List[str] = list(range(lowerCamelCase_ ) ), list(range(lowerCamelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCamelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case_ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
if d_layers_to_copy is None:
snake_case_ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
try:
if hasattr(
lowerCamelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
snake_case_ : Any = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 267
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.